Package gluon :: Module dal
[hide private]
[frames] | no frames]

Source Code for Module gluon.dal

    1  #!/bin/env python 
    2  # -*- coding: utf-8 -*- 
    3   
    4  """ 
    5  This file is part of the web2py Web Framework 
    6  Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> 
    7  License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) 
    8   
    9  Thanks to 
   10      * Niall Sweeny <niall.sweeny@fonjax.com> for MS SQL support 
   11      * Marcel Leuthi <mluethi@mlsystems.ch> for Oracle support 
   12      * Denes 
   13      * Chris Clark 
   14      * clach05 
   15      * Denes Lengyel 
   16      * and many others who have contributed to current and previous versions 
   17   
   18  This file contains the DAL support for many relational databases, 
   19  including: 
   20  - SQLite & SpatiaLite 
   21  - MySQL 
   22  - Postgres 
   23  - Firebird 
   24  - Oracle 
   25  - MS SQL 
   26  - DB2 
   27  - Interbase 
   28  - Ingres 
   29  - Informix (9+ and SE) 
   30  - SapDB (experimental) 
   31  - Cubrid (experimental) 
   32  - CouchDB (experimental) 
   33  - MongoDB (in progress) 
   34  - Google:nosql 
   35  - Google:sql 
   36  - Teradata 
   37  - IMAP (experimental) 
   38   
   39  Example of usage: 
   40   
   41  >>> # from dal import DAL, Field 
   42   
   43  ### create DAL connection (and create DB if it doesn't exist) 
   44  >>> db = DAL(('sqlite://storage.sqlite','mysql://a:b@localhost/x'), 
   45  ... folder=None) 
   46   
   47  ### define a table 'person' (create/alter as necessary) 
   48  >>> person = db.define_table('person',Field('name','string')) 
   49   
   50  ### insert a record 
   51  >>> id = person.insert(name='James') 
   52   
   53  ### retrieve it by id 
   54  >>> james = person(id) 
   55   
   56  ### retrieve it by name 
   57  >>> james = person(name='James') 
   58   
   59  ### retrieve it by arbitrary query 
   60  >>> query = (person.name=='James') & (person.name.startswith('J')) 
   61  >>> james = db(query).select(person.ALL)[0] 
   62   
   63  ### update one record 
   64  >>> james.update_record(name='Jim') 
   65  <Row {'id': 1, 'name': 'Jim'}> 
   66   
   67  ### update multiple records by query 
   68  >>> db(person.name.like('J%')).update(name='James') 
   69  1 
   70   
   71  ### delete records by query 
   72  >>> db(person.name.lower() == 'jim').delete() 
   73  0 
   74   
   75  ### retrieve multiple records (rows) 
   76  >>> people = db(person).select(orderby=person.name, 
   77  ... groupby=person.name, limitby=(0,100)) 
   78   
   79  ### further filter them 
   80  >>> james = people.find(lambda row: row.name == 'James').first() 
   81  >>> print james.id, james.name 
   82  1 James 
   83   
   84  ### check aggregates 
   85  >>> counter = person.id.count() 
   86  >>> print db(person).select(counter).first()(counter) 
   87  1 
   88   
   89  ### delete one record 
   90  >>> james.delete_record() 
   91  1 
   92   
   93  ### delete (drop) entire database table 
   94  >>> person.drop() 
   95   
   96  Supported field types: 
   97  id string text boolean integer double decimal password upload 
   98  blob time date datetime 
   99   
  100  Supported DAL URI strings: 
  101  'sqlite://test.db' 
  102  'spatialite://test.db' 
  103  'sqlite:memory' 
  104  'spatialite:memory' 
  105  'jdbc:sqlite://test.db' 
  106  'mysql://root:none@localhost/test' 
  107  'postgres://mdipierro:password@localhost/test' 
  108  'postgres:psycopg2://mdipierro:password@localhost/test' 
  109  'postgres:pg8000://mdipierro:password@localhost/test' 
  110  'jdbc:postgres://mdipierro:none@localhost/test' 
  111  'mssql://web2py:none@A64X2/web2py_test' 
  112  'mssql2://web2py:none@A64X2/web2py_test' # alternate mappings 
  113  'oracle://username:password@database' 
  114  'firebird://user:password@server:3050/database' 
  115  'db2://DSN=dsn;UID=user;PWD=pass' 
  116  'firebird://username:password@hostname/database' 
  117  'firebird_embedded://username:password@c://path' 
  118  'informix://user:password@server:3050/database' 
  119  'informixu://user:password@server:3050/database' # unicode informix 
  120  'ingres://database'  # or use an ODBC connection string, e.g. 'ingres://dsn=dsn_name' 
  121  'google:datastore' # for google app engine datastore 
  122  'google:sql' # for google app engine with sql (mysql compatible) 
  123  'teradata://DSN=dsn;UID=user;PWD=pass; DATABASE=database' # experimental 
  124  'imap://user:password@server:port' # experimental 
  125  'mongodb://user:password@server:port/database' # experimental 
  126   
  127  For more info: 
  128  help(DAL) 
  129  help(Field) 
  130  """ 
  131   
  132  ################################################################################### 
  133  # this file only exposes DAL and Field 
  134  ################################################################################### 
  135   
  136  __all__ = ['DAL', 'Field'] 
  137   
  138  DEFAULTLENGTH = {'string':512, 
  139                   'password':512, 
  140                   'upload':512, 
  141                   'text':2**15, 
  142                   'blob':2**31} 
  143  TIMINGSSIZE = 100 
  144  SPATIALLIBS = { 
  145      'Windows':'libspatialite', 
  146      'Linux':'libspatialite.so', 
  147      'Darwin':'libspatialite.dylib' 
  148      } 
  149  DEFAULT_URI = 'sqlite://dummy.db' 
  150   
  151  import re 
  152  import sys 
  153  import locale 
  154  import os 
  155  import types 
  156  import datetime 
  157  import threading 
  158  import time 
  159  import csv 
  160  import cgi 
  161  import copy 
  162  import socket 
  163  import logging 
  164  import base64 
  165  import shutil 
  166  import marshal 
  167  import decimal 
  168  import struct 
  169  import urllib 
  170  import hashlib 
  171  import uuid 
  172  import glob 
  173  import traceback 
  174  import platform 
  175   
  176  PYTHON_VERSION = sys.version_info[:3] 
  177  if PYTHON_VERSION[0] == 2: 
  178      import cPickle as pickle 
  179      import cStringIO as StringIO 
  180      import copy_reg as copyreg 
  181      hashlib_md5 = hashlib.md5 
  182      bytes, unicode = str, unicode 
  183  else: 
  184      import pickle 
  185      from io import StringIO as StringIO 
  186      import copyreg 
  187      long = int 
  188      hashlib_md5 = lambda s: hashlib.md5(bytes(s,'utf8')) 
  189      bytes, unicode = bytes, str 
  190   
  191  if PYTHON_VERSION[:2] < (2, 7): 
  192      from gluon.contrib.ordereddict import OrderedDict 
  193  else: 
  194      from collections import OrderedDict 
  195   
  196   
  197  CALLABLETYPES = (types.LambdaType, types.FunctionType, 
  198                   types.BuiltinFunctionType, 
  199                   types.MethodType, types.BuiltinMethodType) 
  200   
  201  TABLE_ARGS = set( 
  202      ('migrate','primarykey','fake_migrate','format','redefine', 
  203       'singular','plural','trigger_name','sequence_name','fields', 
  204       'common_filter','polymodel','table_class','on_define','rname')) 
  205   
  206  SELECT_ARGS = set( 
  207      ('orderby', 'groupby', 'limitby','required', 'cache', 'left', 
  208       'distinct', 'having', 'join','for_update', 'processor','cacheable', 'orderby_on_limitby')) 
  209   
  210  ogetattr = object.__getattribute__ 
  211  osetattr = object.__setattr__ 
  212  exists = os.path.exists 
  213  pjoin = os.path.join 
  214   
  215  ################################################################################### 
  216  # following checks allow the use of dal without web2py, as a standalone module 
  217  ################################################################################### 
  218  try: 
  219      from gluon.utils import web2py_uuid 
  220  except (ImportError, SystemError): 
  221      import uuid 
222 - def web2py_uuid(): return str(uuid.uuid4())
223 224 try: 225 import portalocker 226 have_portalocker = True 227 except ImportError: 228 have_portalocker = False 229 230 try: 231 from gluon import serializers 232 have_serializers = True 233 except ImportError: 234 have_serializers = False 235 try: 236 import json as simplejson 237 except ImportError: 238 try: 239 import gluon.contrib.simplejson as simplejson 240 except ImportError: 241 simplejson = None 242 243 LOGGER = logging.getLogger("web2py.dal") 244 DEFAULT = lambda:0 245 246 GLOBAL_LOCKER = threading.RLock() 247 THREAD_LOCAL = threading.local() 248 249 # internal representation of tables with field 250 # <table>.<field>, tables and fields may only be [a-zA-Z0-9_] 251 252 REGEX_TYPE = re.compile('^([\w\_\:]+)') 253 REGEX_DBNAME = re.compile('^(\w+)(\:\w+)*') 254 REGEX_W = re.compile('^\w+$') 255 REGEX_TABLE_DOT_FIELD = re.compile('^(\w+)\.(\w+)$') 256 REGEX_UPLOAD_PATTERN = re.compile('(?P<table>[\w\-]+)\.(?P<field>[\w\-]+)\.(?P<uuidkey>[\w\-]+)(\.(?P<name>\w+))?\.\w+$') 257 REGEX_CLEANUP_FN = re.compile('[\'"\s;]+') 258 REGEX_UNPACK = re.compile('(?<!\|)\|(?!\|)') 259 REGEX_PYTHON_KEYWORDS = re.compile('^(and|del|from|not|while|as|elif|global|or|with|assert|else|if|pass|yield|break|except|import|print|class|exec|in|raise|continue|finally|is|return|def|for|lambda|try)$') 260 REGEX_SELECT_AS_PARSER = re.compile("\s+AS\s+(\S+)") 261 REGEX_CONST_STRING = re.compile('(\"[^\"]*?\")|(\'[^\']*?\')') 262 REGEX_SEARCH_PATTERN = re.compile('^{[^\.]+\.[^\.]+(\.(lt|gt|le|ge|eq|ne|contains|startswith|year|month|day|hour|minute|second))?(\.not)?}$') 263 REGEX_SQUARE_BRACKETS = re.compile('^.+\[.+\]$') 264 REGEX_STORE_PATTERN = re.compile('\.(?P<e>\w{1,5})$') 265 REGEX_QUOTES = re.compile("'[^']*'") 266 REGEX_ALPHANUMERIC = re.compile('^[0-9a-zA-Z]\w*$') 267 REGEX_PASSWORD = re.compile('\://([^:@]*)\:') 268 REGEX_NOPASSWD = re.compile('\/\/[\w\.\-]+[\:\/](.+)(?=@)') # was '(?<=[\:\/])([^:@/]+)(?=@.+)' 269 270 # list of drivers will be built on the fly 271 # and lists only what is available 272 DRIVERS = [] 273 274 try: 275 from new import classobj 276 from google.appengine.ext import db as gae 277 from google.appengine.ext import ndb 278 from google.appengine.api import namespace_manager, rdbms 279 from google.appengine.api.datastore_types import Key ### for belongs on ID 280 from google.appengine.ext.db.polymodel import PolyModel 281 from google.appengine.ext.ndb.polymodel import PolyModel as NDBPolyModel 282 DRIVERS.append('google') 283 except ImportError: 284 pass 285 286 if not 'google' in DRIVERS: 287 288 try: 289 from pysqlite2 import dbapi2 as sqlite2 290 DRIVERS.append('SQLite(sqlite2)') 291 except ImportError: 292 LOGGER.debug('no SQLite drivers pysqlite2.dbapi2') 293 294 try: 295 from sqlite3 import dbapi2 as sqlite3 296 DRIVERS.append('SQLite(sqlite3)') 297 except ImportError: 298 LOGGER.debug('no SQLite drivers sqlite3') 299 300 try: 301 # first try contrib driver, then from site-packages (if installed) 302 try: 303 import gluon.contrib.pymysql as pymysql 304 # monkeypatch pymysql because they havent fixed the bug: 305 # https://github.com/petehunt/PyMySQL/issues/86 306 pymysql.ESCAPE_REGEX = re.compile("'") 307 pymysql.ESCAPE_MAP = {"'": "''"} 308 # end monkeypatch 309 except ImportError: 310 import pymysql 311 DRIVERS.append('MySQL(pymysql)') 312 except ImportError: 313 LOGGER.debug('no MySQL driver pymysql') 314 315 try: 316 import MySQLdb 317 DRIVERS.append('MySQL(MySQLdb)') 318 except ImportError: 319 LOGGER.debug('no MySQL driver MySQLDB') 320 321 try: 322 import mysql.connector as mysqlconnector 323 DRIVERS.append("MySQL(mysqlconnector)") 324 except ImportError: 325 LOGGER.debug("no driver mysql.connector") 326 327 try: 328 import psycopg2 329 from psycopg2.extensions import adapt as psycopg2_adapt 330 DRIVERS.append('PostgreSQL(psycopg2)') 331 except ImportError: 332 LOGGER.debug('no PostgreSQL driver psycopg2') 333 334 try: 335 # first try contrib driver, then from site-packages (if installed) 336 try: 337 import gluon.contrib.pg8000.dbapi as pg8000 338 except ImportError: 339 import pg8000.dbapi as pg8000 340 DRIVERS.append('PostgreSQL(pg8000)') 341 except ImportError: 342 LOGGER.debug('no PostgreSQL driver pg8000') 343 344 try: 345 import cx_Oracle 346 DRIVERS.append('Oracle(cx_Oracle)') 347 except ImportError: 348 LOGGER.debug('no Oracle driver cx_Oracle') 349 350 try: 351 try: 352 import pyodbc 353 except ImportError: 354 try: 355 import gluon.contrib.pypyodbc as pyodbc 356 except Exception, e: 357 raise ImportError(str(e)) 358 DRIVERS.append('MSSQL(pyodbc)') 359 DRIVERS.append('DB2(pyodbc)') 360 DRIVERS.append('Teradata(pyodbc)') 361 DRIVERS.append('Ingres(pyodbc)') 362 except ImportError: 363 LOGGER.debug('no MSSQL/DB2/Teradata/Ingres driver pyodbc') 364 365 try: 366 import Sybase 367 DRIVERS.append('Sybase(Sybase)') 368 except ImportError: 369 LOGGER.debug('no Sybase driver') 370 371 try: 372 import kinterbasdb 373 DRIVERS.append('Interbase(kinterbasdb)') 374 DRIVERS.append('Firebird(kinterbasdb)') 375 except ImportError: 376 LOGGER.debug('no Firebird/Interbase driver kinterbasdb') 377 378 try: 379 import fdb 380 DRIVERS.append('Firebird(fdb)') 381 except ImportError: 382 LOGGER.debug('no Firebird driver fdb') 383 ##### 384 try: 385 import firebirdsql 386 DRIVERS.append('Firebird(firebirdsql)') 387 except ImportError: 388 LOGGER.debug('no Firebird driver firebirdsql') 389 390 try: 391 import informixdb 392 DRIVERS.append('Informix(informixdb)') 393 LOGGER.warning('Informix support is experimental') 394 except ImportError: 395 LOGGER.debug('no Informix driver informixdb') 396 397 try: 398 import sapdb 399 DRIVERS.append('SQL(sapdb)') 400 LOGGER.warning('SAPDB support is experimental') 401 except ImportError: 402 LOGGER.debug('no SAP driver sapdb') 403 404 try: 405 import cubriddb 406 DRIVERS.append('Cubrid(cubriddb)') 407 LOGGER.warning('Cubrid support is experimental') 408 except ImportError: 409 LOGGER.debug('no Cubrid driver cubriddb') 410 411 try: 412 from com.ziclix.python.sql import zxJDBC 413 import java.sql 414 # Try sqlite jdbc driver from http://www.zentus.com/sqlitejdbc/ 415 from org.sqlite import JDBC # required by java.sql; ensure we have it 416 zxJDBC_sqlite = java.sql.DriverManager 417 DRIVERS.append('PostgreSQL(zxJDBC)') 418 DRIVERS.append('SQLite(zxJDBC)') 419 LOGGER.warning('zxJDBC support is experimental') 420 is_jdbc = True 421 except ImportError: 422 LOGGER.debug('no SQLite/PostgreSQL driver zxJDBC') 423 is_jdbc = False 424 425 try: 426 import couchdb 427 DRIVERS.append('CouchDB(couchdb)') 428 except ImportError: 429 LOGGER.debug('no Couchdb driver couchdb') 430 431 try: 432 import pymongo 433 DRIVERS.append('MongoDB(pymongo)') 434 except: 435 LOGGER.debug('no MongoDB driver pymongo') 436 437 try: 438 import imaplib 439 DRIVERS.append('IMAP(imaplib)') 440 except: 441 LOGGER.debug('no IMAP driver imaplib') 442 443 PLURALIZE_RULES = [ 444 (re.compile('child$'), re.compile('child$'), 'children'), 445 (re.compile('oot$'), re.compile('oot$'), 'eet'), 446 (re.compile('ooth$'), re.compile('ooth$'), 'eeth'), 447 (re.compile('l[eo]af$'), re.compile('l([eo])af$'), 'l\\1aves'), 448 (re.compile('sis$'), re.compile('sis$'), 'ses'), 449 (re.compile('man$'), re.compile('man$'), 'men'), 450 (re.compile('ife$'), re.compile('ife$'), 'ives'), 451 (re.compile('eau$'), re.compile('eau$'), 'eaux'), 452 (re.compile('lf$'), re.compile('lf$'), 'lves'), 453 (re.compile('[sxz]$'), re.compile('$'), 'es'), 454 (re.compile('[^aeioudgkprt]h$'), re.compile('$'), 'es'), 455 (re.compile('(qu|[^aeiou])y$'), re.compile('y$'), 'ies'), 456 (re.compile('$'), re.compile('$'), 's'), 457 ]
458 459 -def pluralize(singular, rules=PLURALIZE_RULES):
460 for line in rules: 461 re_search, re_sub, replace = line 462 plural = re_search.search(singular) and re_sub.sub(replace, singular) 463 if plural: return plural
464
465 -def hide_password(uri):
466 if isinstance(uri,(list,tuple)): 467 return [hide_password(item) for item in uri] 468 return REGEX_NOPASSWD.sub('******',uri)
469
470 -def OR(a,b):
471 return a|b
472
473 -def AND(a,b):
474 return a&b
475
476 -def IDENTITY(x): return x
477
478 -def varquote_aux(name,quotestr='%s'):
479 return name if REGEX_W.match(name) else quotestr % name
480
481 -def quote_keyword(a,keyword='timestamp'):
482 regex = re.compile('\.keyword(?=\w)') 483 a = regex.sub('."%s"' % keyword,a) 484 return a
485 486 if 'google' in DRIVERS: 487 488 is_jdbc = False
489 490 - class GAEDecimalProperty(gae.Property):
491 """ 492 GAE decimal implementation 493 """ 494 data_type = decimal.Decimal 495
496 - def __init__(self, precision, scale, **kwargs):
497 super(GAEDecimalProperty, self).__init__(self, **kwargs) 498 d = '1.' 499 for x in range(scale): 500 d += '0' 501 self.round = decimal.Decimal(d)
502
503 - def get_value_for_datastore(self, model_instance):
504 value = super(GAEDecimalProperty, self)\ 505 .get_value_for_datastore(model_instance) 506 if value is None or value == '': 507 return None 508 else: 509 return str(value)
510
511 - def make_value_from_datastore(self, value):
512 if value is None or value == '': 513 return None 514 else: 515 return decimal.Decimal(value).quantize(self.round)
516
517 - def validate(self, value):
518 value = super(GAEDecimalProperty, self).validate(value) 519 if value is None or isinstance(value, decimal.Decimal): 520 return value 521 elif isinstance(value, basestring): 522 return decimal.Decimal(value) 523 raise gae.BadValueError("Property %s must be a Decimal or string."\ 524 % self.name)
525
526 #TODO Needs more testing 527 - class NDBDecimalProperty(ndb.StringProperty):
528 """ 529 NDB decimal implementation 530 """ 531 data_type = decimal.Decimal 532
533 - def __init__(self, precision, scale, **kwargs):
534 d = '1.' 535 for x in range(scale): 536 d += '0' 537 self.round = decimal.Decimal(d)
538
539 - def _to_base_type(self, value):
540 if value is None or value == '': 541 return None 542 else: 543 return str(value)
544
545 - def _from_base_type(self, value):
546 if value is None or value == '': 547 return None 548 else: 549 return decimal.Decimal(value).quantize(self.round)
550
551 - def _validate(self, value):
552 if value is None or isinstance(value, decimal.Decimal): 553 return value 554 elif isinstance(value, basestring): 555 return decimal.Decimal(value) 556 raise TypeError("Property %s must be a Decimal or string."\ 557 % self._name)
558
559 ################################################################################### 560 # class that handles connection pooling (all adapters are derived from this one) 561 ################################################################################### 562 563 -class ConnectionPool(object):
564 565 POOLS = {} 566 check_active_connection = True 567 568 @staticmethod
569 - def set_folder(folder):
571 572 # ## this allows gluon to commit/rollback all dbs in this thread 573
574 - def close(self,action='commit',really=True):
575 if action: 576 if callable(action): 577 action(self) 578 else: 579 getattr(self, action)() 580 # ## if you want pools, recycle this connection 581 if self.pool_size: 582 GLOBAL_LOCKER.acquire() 583 pool = ConnectionPool.POOLS[self.uri] 584 if len(pool) < self.pool_size: 585 pool.append(self.connection) 586 really = False 587 GLOBAL_LOCKER.release() 588 if really: 589 self.close_connection() 590 self.connection = None
591 592 @staticmethod
593 - def close_all_instances(action):
594 """ to close cleanly databases in a multithreaded environment """ 595 dbs = getattr(THREAD_LOCAL,'db_instances',{}).items() 596 for db_uid, db_group in dbs: 597 for db in db_group: 598 if hasattr(db,'_adapter'): 599 db._adapter.close(action) 600 getattr(THREAD_LOCAL,'db_instances',{}).clear() 601 getattr(THREAD_LOCAL,'db_instances_zombie',{}).clear() 602 if callable(action): 603 action(None) 604 return
605
606 - def find_or_make_work_folder(self):
607 """ this actually does not make the folder. it has to be there """ 608 self.folder = getattr(THREAD_LOCAL,'folder','') 609 610 if (os.path.isabs(self.folder) and 611 isinstance(self, UseDatabaseStoredFile) and 612 self.folder.startswith(os.getcwd())): 613 self.folder = os.path.relpath(self.folder, os.getcwd()) 614 615 # Creating the folder if it does not exist 616 if False and self.folder and not exists(self.folder): 617 os.mkdir(self.folder)
618
619 - def after_connection_hook(self):
620 """hook for the after_connection parameter""" 621 if callable(self._after_connection): 622 self._after_connection(self) 623 self.after_connection()
624
625 - def after_connection(self):
626 """ this it is supposed to be overloaded by adapters""" 627 pass
628
629 - def reconnect(self, f=None, cursor=True):
630 """ 631 this function defines: self.connection and self.cursor 632 (iff cursor is True) 633 if self.pool_size>0 it will try pull the connection from the pool 634 if the connection is not active (closed by db server) it will loop 635 if not self.pool_size or no active connections in pool makes a new one 636 """ 637 if getattr(self,'connection', None) != None: 638 return 639 if f is None: 640 f = self.connector 641 642 # if not hasattr(self, "driver") or self.driver is None: 643 # LOGGER.debug("Skipping connection since there's no driver") 644 # return 645 646 if not self.pool_size: 647 self.connection = f() 648 self.cursor = cursor and self.connection.cursor() 649 else: 650 uri = self.uri 651 POOLS = ConnectionPool.POOLS 652 while True: 653 GLOBAL_LOCKER.acquire() 654 if not uri in POOLS: 655 POOLS[uri] = [] 656 if POOLS[uri]: 657 self.connection = POOLS[uri].pop() 658 GLOBAL_LOCKER.release() 659 self.cursor = cursor and self.connection.cursor() 660 try: 661 if self.cursor and self.check_active_connection: 662 self.execute('SELECT 1;') 663 break 664 except: 665 pass 666 else: 667 GLOBAL_LOCKER.release() 668 self.connection = f() 669 self.cursor = cursor and self.connection.cursor() 670 break 671 self.after_connection_hook()
672
673 674 ################################################################################### 675 # this is a generic adapter that does nothing; all others are derived from this one 676 ################################################################################### 677 678 -class BaseAdapter(ConnectionPool):
679 native_json = False 680 driver = None 681 driver_name = None 682 drivers = () # list of drivers from which to pick 683 connection = None 684 commit_on_alter_table = False 685 support_distributed_transaction = False 686 uploads_in_blob = False 687 can_select_for_update = True 688 dbpath = None 689 folder = None 690 691 TRUE = 'T' 692 FALSE = 'F' 693 T_SEP = ' ' 694 QUOTE_TEMPLATE = '"%s"' 695 696 types = { 697 'boolean': 'CHAR(1)', 698 'string': 'CHAR(%(length)s)', 699 'text': 'TEXT', 700 'json': 'TEXT', 701 'password': 'CHAR(%(length)s)', 702 'blob': 'BLOB', 703 'upload': 'CHAR(%(length)s)', 704 'integer': 'INTEGER', 705 'bigint': 'INTEGER', 706 'float':'DOUBLE', 707 'double': 'DOUBLE', 708 'decimal': 'DOUBLE', 709 'date': 'DATE', 710 'time': 'TIME', 711 'datetime': 'TIMESTAMP', 712 'id': 'INTEGER PRIMARY KEY AUTOINCREMENT', 713 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 714 'list:integer': 'TEXT', 715 'list:string': 'TEXT', 716 'list:reference': 'TEXT', 717 # the two below are only used when DAL(...bigint_id=True) and replace 'id','reference' 718 'big-id': 'BIGINT PRIMARY KEY AUTOINCREMENT', 719 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 720 } 721
722 - def isOperationalError(self,exception):
723 if not hasattr(self.driver, "OperationalError"): 724 return None 725 return isinstance(exception, self.driver.OperationalError)
726
727 - def isProgrammingError(self,exception):
728 if not hasattr(self.driver, "ProgrammingError"): 729 return None 730 return isinstance(exception, self.driver.ProgrammingError)
731
732 - def id_query(self, table):
733 pkeys = getattr(table,'_primarykey',None) 734 if pkeys: 735 return table[pkeys[0]] != None 736 else: 737 return table._id != None
738
739 - def adapt(self, obj):
740 return "'%s'" % obj.replace("'", "''")
741
742 - def smart_adapt(self, obj):
743 if isinstance(obj,(int,float)): 744 return str(obj) 745 return self.adapt(str(obj))
746
747 - def file_exists(self, filename):
748 """ 749 to be used ONLY for files that on GAE may not be on filesystem 750 """ 751 return exists(filename)
752
753 - def file_open(self, filename, mode='rb', lock=True):
754 """ 755 to be used ONLY for files that on GAE may not be on filesystem 756 """ 757 if have_portalocker and lock: 758 fileobj = portalocker.LockedFile(filename,mode) 759 else: 760 fileobj = open(filename,mode) 761 return fileobj
762
763 - def file_close(self, fileobj):
764 """ 765 to be used ONLY for files that on GAE may not be on filesystem 766 """ 767 if fileobj: 768 fileobj.close()
769
770 - def file_delete(self, filename):
771 os.unlink(filename)
772
773 - def find_driver(self,adapter_args,uri=None):
774 self.adapter_args = adapter_args 775 if getattr(self,'driver',None) != None: 776 return 777 drivers_available = [driver for driver in self.drivers 778 if driver in globals()] 779 if uri: 780 items = uri.split('://',1)[0].split(':') 781 request_driver = items[1] if len(items)>1 else None 782 else: 783 request_driver = None 784 request_driver = request_driver or adapter_args.get('driver') 785 if request_driver: 786 if request_driver in drivers_available: 787 self.driver_name = request_driver 788 self.driver = globals().get(request_driver) 789 else: 790 raise RuntimeError("driver %s not available" % request_driver) 791 elif drivers_available: 792 self.driver_name = drivers_available[0] 793 self.driver = globals().get(self.driver_name) 794 else: 795 raise RuntimeError("no driver available %s" % str(self.drivers))
796
797 - def log(self, message, table=None):
798 """ Logs migrations 799 800 It will not log changes if logfile is not specified. Defaults 801 to sql.log 802 """ 803 804 isabs = None 805 logfilename = self.adapter_args.get('logfile','sql.log') 806 writelog = bool(logfilename) 807 if writelog: 808 isabs = os.path.isabs(logfilename) 809 810 if table and table._dbt and writelog and self.folder: 811 if isabs: 812 table._loggername = logfilename 813 else: 814 table._loggername = pjoin(self.folder, logfilename) 815 logfile = self.file_open(table._loggername, 'a') 816 logfile.write(message) 817 self.file_close(logfile)
818 819
820 - def __init__(self, db,uri,pool_size=0, folder=None, db_codec='UTF-8', 821 credential_decoder=IDENTITY, driver_args={}, 822 adapter_args={},do_connect=True, after_connection=None):
823 self.db = db 824 self.dbengine = "None" 825 self.uri = uri 826 self.pool_size = pool_size 827 self.folder = folder 828 self.db_codec = db_codec 829 self._after_connection = after_connection 830 class Dummy(object): 831 lastrowid = 1 832 def __getattr__(self, value): 833 return lambda *a, **b: []
834 self.connection = Dummy() 835 self.cursor = Dummy() 836
837 - def sequence_name(self,tablename):
838 return '%s_sequence' % tablename
839
840 - def trigger_name(self,tablename):
841 return '%s_sequence' % tablename
842
843 - def varquote(self,name):
844 return name
845
846 - def create_table(self, table, 847 migrate=True, 848 fake_migrate=False, 849 polymodel=None):
850 db = table._db 851 fields = [] 852 # PostGIS geo fields are added after the table has been created 853 postcreation_fields = [] 854 sql_fields = {} 855 sql_fields_aux = {} 856 TFK = {} 857 tablename = table._tablename 858 sortable = 0 859 types = self.types 860 for field in table: 861 sortable += 1 862 field_name = field.name 863 field_type = field.type 864 if isinstance(field_type,SQLCustomType): 865 ftype = field_type.native or field_type.type 866 elif field_type.startswith('reference'): 867 referenced = field_type[10:].strip() 868 if referenced == '.': 869 referenced = tablename 870 constraint_name = self.constraint_name(tablename, field_name) 871 if not '.' in referenced \ 872 and referenced != tablename \ 873 and hasattr(table,'_primarykey'): 874 ftype = types['integer'] 875 else: 876 if hasattr(table,'_primarykey'): 877 rtablename,rfieldname = referenced.split('.') 878 rtable = db[rtablename] 879 rfield = rtable[rfieldname] 880 # must be PK reference or unique 881 if rfieldname in rtable._primarykey or \ 882 rfield.unique: 883 ftype = types[rfield.type[:9]] % \ 884 dict(length=rfield.length) 885 # multicolumn primary key reference? 886 if not rfield.unique and len(rtable._primarykey)>1: 887 # then it has to be a table level FK 888 if rtablename not in TFK: 889 TFK[rtablename] = {} 890 TFK[rtablename][rfieldname] = field_name 891 else: 892 ftype = ftype + \ 893 types['reference FK'] % dict( 894 constraint_name = constraint_name, # should be quoted 895 foreign_key = '%s (%s)' % (rtablename, 896 rfieldname), 897 table_name = tablename, 898 field_name = field._rname or field.name, 899 on_delete_action=field.ondelete) 900 else: 901 # make a guess here for circular references 902 if referenced in db: 903 id_fieldname = db[referenced]._id.name 904 elif referenced == tablename: 905 id_fieldname = table._id.name 906 else: #make a guess 907 id_fieldname = 'id' 908 #gotcha: the referenced table must be defined before 909 #the referencing one to be able to create the table 910 #Also if it's not recommended, we can still support 911 #references to tablenames without rname to make 912 #migrations and model relationship work also if tables 913 #are not defined in order 914 real_referenced = ( 915 (db[referenced]._rname or db[referenced]) 916 if referenced == tablename or referenced in db 917 else referenced) 918 919 ftype = types[field_type[:9]] % dict( 920 index_name = field_name+'__idx', 921 field_name = field._rname or field.name, 922 constraint_name = constraint_name, 923 foreign_key = '%s (%s)' % (real_referenced, 924 id_fieldname), 925 on_delete_action=field.ondelete) 926 elif field_type.startswith('list:reference'): 927 ftype = types[field_type[:14]] 928 elif field_type.startswith('decimal'): 929 precision, scale = map(int,field_type[8:-1].split(',')) 930 ftype = types[field_type[:7]] % \ 931 dict(precision=precision,scale=scale) 932 elif field_type.startswith('geo'): 933 if not hasattr(self,'srid'): 934 raise RuntimeError('Adapter does not support geometry') 935 srid = self.srid 936 geotype, parms = field_type[:-1].split('(') 937 if not geotype in types: 938 raise SyntaxError( 939 'Field: unknown field type: %s for %s' \ 940 % (field_type, field_name)) 941 ftype = types[geotype] 942 if self.dbengine == 'postgres' and geotype == 'geometry': 943 # parameters: schema, srid, dimension 944 dimension = 2 # GIS.dimension ??? 945 parms = parms.split(',') 946 if len(parms) == 3: 947 schema, srid, dimension = parms 948 elif len(parms) == 2: 949 schema, srid = parms 950 else: 951 schema = parms[0] 952 ftype = "SELECT AddGeometryColumn ('%%(schema)s', '%%(tablename)s', '%%(fieldname)s', %%(srid)s, '%s', %%(dimension)s);" % types[geotype] 953 ftype = ftype % dict(schema=schema, 954 tablename=tablename, 955 fieldname=field_name, srid=srid, 956 dimension=dimension) 957 postcreation_fields.append(ftype) 958 elif not field_type in types: 959 raise SyntaxError('Field: unknown field type: %s for %s' % \ 960 (field_type, field_name)) 961 else: 962 ftype = types[field_type]\ 963 % dict(length=field.length) 964 if not field_type.startswith('id') and \ 965 not field_type.startswith('reference'): 966 if field.notnull: 967 ftype += ' NOT NULL' 968 else: 969 ftype += self.ALLOW_NULL() 970 if field.unique: 971 ftype += ' UNIQUE' 972 if field.custom_qualifier: 973 ftype += ' %s' % field.custom_qualifier 974 975 # add to list of fields 976 sql_fields[field_name] = dict( 977 length=field.length, 978 unique=field.unique, 979 notnull=field.notnull, 980 sortable=sortable, 981 type=str(field_type), 982 sql=ftype) 983 984 if field.notnull and not field.default is None: 985 # Caveat: sql_fields and sql_fields_aux 986 # differ for default values. 987 # sql_fields is used to trigger migrations and sql_fields_aux 988 # is used for create tables. 989 # The reason is that we do not want to trigger 990 # a migration simply because a default value changes. 991 not_null = self.NOT_NULL(field.default, field_type) 992 ftype = ftype.replace('NOT NULL', not_null) 993 sql_fields_aux[field_name] = dict(sql=ftype) 994 # Postgres - PostGIS: 995 # geometry fields are added after the table has been created, not now 996 if not (self.dbengine == 'postgres' and \ 997 field_type.startswith('geom')): 998 #fetch the rname if it's there 999 field_rname = "%s" % (field._rname or field_name) 1000 fields.append('%s %s' % (field_rname, ftype)) 1001 other = ';' 1002 1003 # backend-specific extensions to fields 1004 if self.dbengine == 'mysql': 1005 if not hasattr(table, "_primarykey"): 1006 fields.append('PRIMARY KEY(%s)' % (table._id.name or table._id._rname)) 1007 other = ' ENGINE=InnoDB CHARACTER SET utf8;' 1008 1009 fields = ',\n '.join(fields) 1010 for rtablename in TFK: 1011 rfields = TFK[rtablename] 1012 pkeys = db[rtablename]._primarykey 1013 fkeys = [ rfields[k] for k in pkeys ] 1014 fields = fields + ',\n ' + \ 1015 types['reference TFK'] % dict( 1016 table_name = tablename, 1017 field_name=', '.join(fkeys), 1018 foreign_table = rtablename, 1019 foreign_key = ', '.join(pkeys), 1020 on_delete_action = field.ondelete) 1021 #if there's a _rname, let's use that instead 1022 table_rname = table._rname or tablename 1023 1024 if getattr(table,'_primarykey',None): 1025 query = "CREATE TABLE %s(\n %s,\n %s) %s" % \ 1026 (table_rname, fields, 1027 self.PRIMARY_KEY(', '.join(table._primarykey)),other) 1028 else: 1029 query = "CREATE TABLE %s(\n %s\n)%s" % \ 1030 (table_rname, fields, other) 1031 1032 if self.uri.startswith('sqlite:///') \ 1033 or self.uri.startswith('spatialite:///'): 1034 path_encoding = sys.getfilesystemencoding() \ 1035 or locale.getdefaultlocale()[1] or 'utf8' 1036 dbpath = self.uri[9:self.uri.rfind('/')]\ 1037 .decode('utf8').encode(path_encoding) 1038 else: 1039 dbpath = self.folder 1040 1041 if not migrate: 1042 return query 1043 elif self.uri.startswith('sqlite:memory')\ 1044 or self.uri.startswith('spatialite:memory'): 1045 table._dbt = None 1046 elif isinstance(migrate, str): 1047 table._dbt = pjoin(dbpath, migrate) 1048 else: 1049 table._dbt = pjoin( 1050 dbpath, '%s_%s.table' % (table._db._uri_hash, tablename)) 1051 1052 if not table._dbt or not self.file_exists(table._dbt): 1053 if table._dbt: 1054 self.log('timestamp: %s\n%s\n' 1055 % (datetime.datetime.today().isoformat(), 1056 query), table) 1057 if not fake_migrate: 1058 self.create_sequence_and_triggers(query,table) 1059 table._db.commit() 1060 # Postgres geom fields are added now, 1061 # after the table has been created 1062 for query in postcreation_fields: 1063 self.execute(query) 1064 table._db.commit() 1065 if table._dbt: 1066 tfile = self.file_open(table._dbt, 'w') 1067 pickle.dump(sql_fields, tfile) 1068 self.file_close(tfile) 1069 if fake_migrate: 1070 self.log('faked!\n', table) 1071 else: 1072 self.log('success!\n', table) 1073 else: 1074 tfile = self.file_open(table._dbt, 'r') 1075 try: 1076 sql_fields_old = pickle.load(tfile) 1077 except EOFError: 1078 self.file_close(tfile) 1079 raise RuntimeError('File %s appears corrupted' % table._dbt) 1080 self.file_close(tfile) 1081 if sql_fields != sql_fields_old: 1082 self.migrate_table( 1083 table, 1084 sql_fields, sql_fields_old, 1085 sql_fields_aux, None, 1086 fake_migrate=fake_migrate 1087 ) 1088 return query
1089
1090 - def migrate_table( 1091 self, 1092 table, 1093 sql_fields, 1094 sql_fields_old, 1095 sql_fields_aux, 1096 logfile, 1097 fake_migrate=False, 1098 ):
1099 1100 # logfile is deprecated (moved to adapter.log method) 1101 db = table._db 1102 db._migrated.append(table._tablename) 1103 tablename = table._tablename 1104 def fix(item): 1105 k,v=item 1106 if not isinstance(v,dict): 1107 v=dict(type='unknown',sql=v) 1108 return k.lower(),v
1109 # make sure all field names are lower case to avoid 1110 # migrations because of case cahnge 1111 sql_fields = dict(map(fix,sql_fields.iteritems())) 1112 sql_fields_old = dict(map(fix,sql_fields_old.iteritems())) 1113 sql_fields_aux = dict(map(fix,sql_fields_aux.iteritems())) 1114 if db._debug: 1115 logging.debug('migrating %s to %s' % (sql_fields_old,sql_fields)) 1116 1117 keys = sql_fields.keys() 1118 for key in sql_fields_old: 1119 if not key in keys: 1120 keys.append(key) 1121 new_add = self.concat_add(tablename) 1122 1123 metadata_change = False 1124 sql_fields_current = copy.copy(sql_fields_old) 1125 for key in keys: 1126 query = None 1127 if not key in sql_fields_old: 1128 sql_fields_current[key] = sql_fields[key] 1129 if self.dbengine in ('postgres',) and \ 1130 sql_fields[key]['type'].startswith('geometry'): 1131 # 'sql' == ftype in sql 1132 query = [ sql_fields[key]['sql'] ] 1133 else: 1134 query = ['ALTER TABLE %s ADD %s %s;' % \ 1135 (tablename, key, 1136 sql_fields_aux[key]['sql'].replace(', ', new_add))] 1137 metadata_change = True 1138 elif self.dbengine in ('sqlite', 'spatialite'): 1139 if key in sql_fields: 1140 sql_fields_current[key] = sql_fields[key] 1141 metadata_change = True 1142 elif not key in sql_fields: 1143 del sql_fields_current[key] 1144 ftype = sql_fields_old[key]['type'] 1145 if (self.dbengine in ('postgres',) and 1146 ftype.startswith('geometry')): 1147 geotype, parms = ftype[:-1].split('(') 1148 schema = parms.split(',')[0] 1149 query = [ "SELECT DropGeometryColumn ('%(schema)s', "+ 1150 "'%(table)s', '%(field)s');" % 1151 dict(schema=schema, table=tablename, field=key,) ] 1152 elif self.dbengine in ('firebird',): 1153 query = ['ALTER TABLE %s DROP %s;' % (tablename, key)] 1154 else: 1155 query = ['ALTER TABLE %s DROP COLUMN %s;' % 1156 (tablename, key)] 1157 metadata_change = True 1158 elif sql_fields[key]['sql'] != sql_fields_old[key]['sql'] \ 1159 and not (key in table.fields and 1160 isinstance(table[key].type, SQLCustomType)) \ 1161 and not sql_fields[key]['type'].startswith('reference')\ 1162 and not sql_fields[key]['type'].startswith('double')\ 1163 and not sql_fields[key]['type'].startswith('id'): 1164 sql_fields_current[key] = sql_fields[key] 1165 t = tablename 1166 tt = sql_fields_aux[key]['sql'].replace(', ', new_add) 1167 if self.dbengine in ('firebird',): 1168 drop_expr = 'ALTER TABLE %s DROP %s;' 1169 else: 1170 drop_expr = 'ALTER TABLE %s DROP COLUMN %s;' 1171 key_tmp = key + '__tmp' 1172 query = ['ALTER TABLE %s ADD %s %s;' % (t, key_tmp, tt), 1173 'UPDATE %s SET %s=%s;' % (t, key_tmp, key), 1174 drop_expr % (t, key), 1175 'ALTER TABLE %s ADD %s %s;' % (t, key, tt), 1176 'UPDATE %s SET %s=%s;' % (t, key, key_tmp), 1177 drop_expr % (t, key_tmp)] 1178 metadata_change = True 1179 elif sql_fields[key]['type'] != sql_fields_old[key]['type']: 1180 sql_fields_current[key] = sql_fields[key] 1181 metadata_change = True 1182 1183 if query: 1184 self.log('timestamp: %s\n' 1185 % datetime.datetime.today().isoformat(), table) 1186 db['_lastsql'] = '\n'.join(query) 1187 for sub_query in query: 1188 self.log(sub_query + '\n', table) 1189 if fake_migrate: 1190 if db._adapter.commit_on_alter_table: 1191 self.save_dbt(table,sql_fields_current) 1192 self.log('faked!\n', table) 1193 else: 1194 self.execute(sub_query) 1195 # Caveat: mysql, oracle and firebird 1196 # do not allow multiple alter table 1197 # in one transaction so we must commit 1198 # partial transactions and 1199 # update table._dbt after alter table. 1200 if db._adapter.commit_on_alter_table: 1201 db.commit() 1202 self.save_dbt(table,sql_fields_current) 1203 self.log('success!\n', table) 1204 1205 elif metadata_change: 1206 self.save_dbt(table,sql_fields_current) 1207 1208 if metadata_change and not (query and db._adapter.commit_on_alter_table): 1209 db.commit() 1210 self.save_dbt(table,sql_fields_current) 1211 self.log('success!\n', table) 1212
1213 - def save_dbt(self,table, sql_fields_current):
1214 tfile = self.file_open(table._dbt, 'w') 1215 pickle.dump(sql_fields_current, tfile) 1216 self.file_close(tfile)
1217
1218 - def LOWER(self, first):
1219 return 'LOWER(%s)' % self.expand(first)
1220
1221 - def UPPER(self, first):
1222 return 'UPPER(%s)' % self.expand(first)
1223
1224 - def COUNT(self, first, distinct=None):
1225 return ('COUNT(%s)' if not distinct else 'COUNT(DISTINCT %s)') \ 1226 % self.expand(first)
1227
1228 - def EXTRACT(self, first, what):
1229 return "EXTRACT(%s FROM %s)" % (what, self.expand(first))
1230
1231 - def EPOCH(self, first):
1232 return self.EXTRACT(first, 'epoch')
1233
1234 - def LENGTH(self, first):
1235 return "LENGTH(%s)" % self.expand(first)
1236
1237 - def AGGREGATE(self, first, what):
1238 return "%s(%s)" % (what, self.expand(first))
1239
1240 - def JOIN(self):
1241 return 'JOIN'
1242
1243 - def LEFT_JOIN(self):
1244 return 'LEFT JOIN'
1245
1246 - def RANDOM(self):
1247 return 'Random()'
1248
1249 - def NOT_NULL(self, default, field_type):
1250 return 'NOT NULL DEFAULT %s' % self.represent(default,field_type)
1251
1252 - def COALESCE(self, first, second):
1253 expressions = [self.expand(first)]+[self.expand(e) for e in second] 1254 return 'COALESCE(%s)' % ','.join(expressions)
1255
1256 - def COALESCE_ZERO(self, first):
1257 return 'COALESCE(%s,0)' % self.expand(first)
1258
1259 - def RAW(self, first):
1260 return first
1261
1262 - def ALLOW_NULL(self):
1263 return ''
1264
1265 - def SUBSTRING(self, field, parameters):
1266 return 'SUBSTR(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
1267
1268 - def PRIMARY_KEY(self, key):
1269 return 'PRIMARY KEY(%s)' % key
1270
1271 - def _drop(self, table, mode):
1272 table_rname = table._rname or table 1273 return ['DROP TABLE %s;' % table_rname]
1274
1275 - def drop(self, table, mode=''):
1276 db = table._db 1277 queries = self._drop(table, mode) 1278 for query in queries: 1279 if table._dbt: 1280 self.log(query + '\n', table) 1281 self.execute(query) 1282 db.commit() 1283 del db[table._tablename] 1284 del db.tables[db.tables.index(table._tablename)] 1285 db._remove_references_to(table) 1286 if table._dbt: 1287 self.file_delete(table._dbt) 1288 self.log('success!\n', table)
1289
1290 - def _insert(self, table, fields):
1291 table_rname = table._rname or table 1292 if fields: 1293 keys = ','.join(f._rname or f.name for f, v in fields) 1294 values = ','.join(self.expand(v, f.type) for f, v in fields) 1295 return 'INSERT INTO %s(%s) VALUES (%s);' % (table_rname, keys, values) 1296 else: 1297 return self._insert_empty(table)
1298
1299 - def _insert_empty(self, table):
1300 table_rname = table._rname or table 1301 return 'INSERT INTO %s DEFAULT VALUES;' % table_rname
1302
1303 - def insert(self, table, fields):
1304 query = self._insert(table,fields) 1305 try: 1306 self.execute(query) 1307 except Exception: 1308 e = sys.exc_info()[1] 1309 if hasattr(table,'_on_insert_error'): 1310 return table._on_insert_error(table,fields,e) 1311 raise e 1312 if hasattr(table,'_primarykey'): 1313 return dict([(k[0].name, k[1]) for k in fields \ 1314 if k[0].name in table._primarykey]) 1315 id = self.lastrowid(table) 1316 if not isinstance(id, (int, long)): 1317 return id 1318 rid = Reference(id) 1319 (rid._table, rid._record) = (table, None) 1320 return rid
1321
1322 - def bulk_insert(self, table, items):
1323 return [self.insert(table,item) for item in items]
1324
1325 - def NOT(self, first):
1326 return '(NOT %s)' % self.expand(first)
1327
1328 - def AND(self, first, second):
1329 return '(%s AND %s)' % (self.expand(first), self.expand(second))
1330
1331 - def OR(self, first, second):
1332 return '(%s OR %s)' % (self.expand(first), self.expand(second))
1333
1334 - def BELONGS(self, first, second):
1335 if isinstance(second, str): 1336 return '(%s IN (%s))' % (self.expand(first), second[:-1]) 1337 if not second: 1338 return '(1=0)' 1339 items = ','.join(self.expand(item, first.type) for item in second) 1340 return '(%s IN (%s))' % (self.expand(first), items)
1341
1342 - def REGEXP(self, first, second):
1343 "regular expression operator" 1344 raise NotImplementedError
1345
1346 - def LIKE(self, first, second):
1347 "case sensitive like operator" 1348 raise NotImplementedError
1349
1350 - def ILIKE(self, first, second):
1351 "case in-sensitive like operator" 1352 return '(%s LIKE %s)' % (self.expand(first), 1353 self.expand(second, 'string'))
1354
1355 - def STARTSWITH(self, first, second):
1356 return '(%s LIKE %s)' % (self.expand(first), 1357 self.expand(second+'%', 'string'))
1358
1359 - def ENDSWITH(self, first, second):
1360 return '(%s LIKE %s)' % (self.expand(first), 1361 self.expand('%'+second, 'string'))
1362
1363 - def CONTAINS(self,first,second,case_sensitive=False):
1364 if first.type in ('string','text', 'json'): 1365 if isinstance(second,Expression): 1366 second = Expression(None,self.CONCAT('%',Expression( 1367 None,self.REPLACE(second,('%','%%'))),'%')) 1368 else: 1369 second = '%'+str(second).replace('%','%%')+'%' 1370 elif first.type.startswith('list:'): 1371 if isinstance(second,Expression): 1372 second = Expression(None,self.CONCAT( 1373 '%|',Expression(None,self.REPLACE( 1374 Expression(None,self.REPLACE( 1375 second,('%','%%'))),('|','||'))),'|%')) 1376 else: 1377 second = '%|'+str(second).replace('%','%%')\ 1378 .replace('|','||')+'|%' 1379 op = case_sensitive and self.LIKE or self.ILIKE 1380 return op(first,second)
1381
1382 - def EQ(self, first, second=None):
1383 if second is None: 1384 return '(%s IS NULL)' % self.expand(first) 1385 return '(%s = %s)' % (self.expand(first), 1386 self.expand(second, first.type))
1387
1388 - def NE(self, first, second=None):
1389 if second is None: 1390 return '(%s IS NOT NULL)' % self.expand(first) 1391 return '(%s <> %s)' % (self.expand(first), 1392 self.expand(second, first.type))
1393
1394 - def LT(self,first,second=None):
1395 if second is None: 1396 raise RuntimeError("Cannot compare %s < None" % first) 1397 return '(%s < %s)' % (self.expand(first), 1398 self.expand(second,first.type))
1399
1400 - def LE(self,first,second=None):
1401 if second is None: 1402 raise RuntimeError("Cannot compare %s <= None" % first) 1403 return '(%s <= %s)' % (self.expand(first), 1404 self.expand(second,first.type))
1405
1406 - def GT(self,first,second=None):
1407 if second is None: 1408 raise RuntimeError("Cannot compare %s > None" % first) 1409 return '(%s > %s)' % (self.expand(first), 1410 self.expand(second,first.type))
1411
1412 - def GE(self,first,second=None):
1413 if second is None: 1414 raise RuntimeError("Cannot compare %s >= None" % first) 1415 return '(%s >= %s)' % (self.expand(first), 1416 self.expand(second,first.type))
1417
1418 - def is_numerical_type(self, ftype):
1419 return ftype in ('integer','boolean','double','bigint') or \ 1420 ftype.startswith('decimal')
1421
1422 - def REPLACE(self, first, (second, third)):
1423 return 'REPLACE(%s,%s,%s)' % (self.expand(first,'string'), 1424 self.expand(second,'string'), 1425 self.expand(third,'string'))
1426
1427 - def CONCAT(self, *items):
1428 return '(%s)' % ' || '.join(self.expand(x,'string') for x in items)
1429
1430 - def ADD(self, first, second):
1431 if self.is_numerical_type(first.type): 1432 return '(%s + %s)' % (self.expand(first), 1433 self.expand(second, first.type)) 1434 else: 1435 return self.CONCAT(first, second)
1436
1437 - def SUB(self, first, second):
1438 return '(%s - %s)' % (self.expand(first), 1439 self.expand(second, first.type))
1440
1441 - def MUL(self, first, second):
1442 return '(%s * %s)' % (self.expand(first), 1443 self.expand(second, first.type))
1444
1445 - def DIV(self, first, second):
1446 return '(%s / %s)' % (self.expand(first), 1447 self.expand(second, first.type))
1448
1449 - def MOD(self, first, second):
1450 return '(%s %% %s)' % (self.expand(first), 1451 self.expand(second, first.type))
1452
1453 - def AS(self, first, second):
1454 return '%s AS %s' % (self.expand(first), second)
1455
1456 - def ON(self, first, second):
1457 table_rname = first._ot and first or first._rname or first._tablename 1458 if use_common_filters(second): 1459 second = self.common_filter(second,[first._tablename]) 1460 return '%s ON %s' % (self.expand(table_rname), self.expand(second))
1461
1462 - def INVERT(self, first):
1463 return '%s DESC' % self.expand(first)
1464
1465 - def COMMA(self, first, second):
1466 return '%s, %s' % (self.expand(first), self.expand(second))
1467
1468 - def CAST(self, first, second):
1469 return 'CAST(%s AS %s)' % (first, second)
1470
1471 - def expand(self, expression, field_type=None, colnames=False):
1472 if isinstance(expression, Field): 1473 et = expression.table 1474 if not colnames: 1475 table_rname = et._ot and et._tablename or et._rname or et._tablename 1476 else: 1477 table_rname = et._tablename 1478 if not colnames: 1479 out = '%s.%s' % (table_rname, expression._rname or expression.name) 1480 else: 1481 out = '%s.%s' % (table_rname, expression.name) 1482 if field_type == 'string' and not expression.type in ( 1483 'string','text','json','password'): 1484 out = self.CAST(out, self.types['text']) 1485 return out 1486 elif isinstance(expression, (Expression, Query)): 1487 first = expression.first 1488 second = expression.second 1489 op = expression.op 1490 optional_args = expression.optional_args or {} 1491 if not second is None: 1492 out = op(first, second, **optional_args) 1493 elif not first is None: 1494 out = op(first,**optional_args) 1495 elif isinstance(op, str): 1496 if op.endswith(';'): 1497 op=op[:-1] 1498 out = '(%s)' % op 1499 else: 1500 out = op() 1501 return out 1502 elif field_type: 1503 return str(self.represent(expression,field_type)) 1504 elif isinstance(expression,(list,tuple)): 1505 return ','.join(self.represent(item,field_type) \ 1506 for item in expression) 1507 elif isinstance(expression, bool): 1508 return '1' if expression else '0' 1509 else: 1510 return str(expression)
1511
1512 - def table_alias(self,name):
1513 if not isinstance(name, Table): 1514 name = self.db[name]._rname or self.db[name] 1515 return str(name)
1516
1517 - def alias(self, table, alias):
1518 """ 1519 Given a table object, makes a new table object 1520 with alias name. 1521 """ 1522 other = copy.copy(table) 1523 other['_ot'] = other._ot or other._rname or other._tablename 1524 other['ALL'] = SQLALL(other) 1525 other['_tablename'] = alias 1526 for fieldname in other.fields: 1527 other[fieldname] = copy.copy(other[fieldname]) 1528 other[fieldname]._tablename = alias 1529 other[fieldname].tablename = alias 1530 other[fieldname].table = other 1531 table._db[alias] = other 1532 return other
1533
1534 - def _truncate(self, table, mode=''):
1535 tablename = table._rname or table._tablename 1536 return ['TRUNCATE TABLE %s %s;' % (tablename, mode or '')]
1537
1538 - def truncate(self, table, mode= ' '):
1539 # Prepare functions "write_to_logfile" and "close_logfile" 1540 try: 1541 queries = table._db._adapter._truncate(table, mode) 1542 for query in queries: 1543 self.log(query + '\n', table) 1544 self.execute(query) 1545 self.log('success!\n', table) 1546 finally: 1547 pass
1548
1549 - def _update(self, tablename, query, fields):
1550 if query: 1551 if use_common_filters(query): 1552 query = self.common_filter(query, [tablename]) 1553 sql_w = ' WHERE ' + self.expand(query) 1554 else: 1555 sql_w = '' 1556 sql_v = ','.join(['%s=%s' % (field._rname or field.name, 1557 self.expand(value, field.type)) \ 1558 for (field, value) in fields]) 1559 tablename = "%s" % (self.db[tablename]._rname or tablename) 1560 return 'UPDATE %s SET %s%s;' % (tablename, sql_v, sql_w)
1561
1562 - def update(self, tablename, query, fields):
1563 sql = self._update(tablename, query, fields) 1564 try: 1565 self.execute(sql) 1566 except Exception: 1567 e = sys.exc_info()[1] 1568 table = self.db[tablename] 1569 if hasattr(table,'_on_update_error'): 1570 return table._on_update_error(table,query,fields,e) 1571 raise e 1572 try: 1573 return self.cursor.rowcount 1574 except: 1575 return None
1576
1577 - def _delete(self, tablename, query):
1578 if query: 1579 if use_common_filters(query): 1580 query = self.common_filter(query, [tablename]) 1581 sql_w = ' WHERE ' + self.expand(query) 1582 else: 1583 sql_w = '' 1584 tablename = '%s' % (self.db[tablename]._rname or tablename) 1585 return 'DELETE FROM %s%s;' % (tablename, sql_w)
1586
1587 - def delete(self, tablename, query):
1588 sql = self._delete(tablename, query) 1589 ### special code to handle CASCADE in SQLite & SpatiaLite 1590 db = self.db 1591 table = db[tablename] 1592 if self.dbengine in ('sqlite', 'spatialite') and table._referenced_by: 1593 deleted = [x[table._id.name] for x in db(query).select(table._id)] 1594 ### end special code to handle CASCADE in SQLite & SpatiaLite 1595 self.execute(sql) 1596 try: 1597 counter = self.cursor.rowcount 1598 except: 1599 counter = None 1600 ### special code to handle CASCADE in SQLite & SpatiaLite 1601 if self.dbengine in ('sqlite', 'spatialite') and counter: 1602 for field in table._referenced_by: 1603 if field.type=='reference '+table._tablename \ 1604 and field.ondelete=='CASCADE': 1605 db(field.belongs(deleted)).delete() 1606 ### end special code to handle CASCADE in SQLite & SpatiaLite 1607 return counter
1608
1609 - def get_table(self, query):
1610 tablenames = self.tables(query) 1611 if len(tablenames)==1: 1612 return tablenames[0] 1613 elif len(tablenames)<1: 1614 raise RuntimeError("No table selected") 1615 else: 1616 raise RuntimeError("Too many tables selected")
1617
1618 - def expand_all(self, fields, tablenames):
1619 db = self.db 1620 new_fields = [] 1621 append = new_fields.append 1622 for item in fields: 1623 if isinstance(item,SQLALL): 1624 new_fields += item._table 1625 elif isinstance(item,str): 1626 if REGEX_TABLE_DOT_FIELD.match(item): 1627 tablename,fieldname = item.split('.') 1628 append(db[tablename][fieldname]) 1629 else: 1630 append(Expression(db,lambda item=item:item)) 1631 else: 1632 append(item) 1633 # ## if no fields specified take them all from the requested tables 1634 if not new_fields: 1635 for table in tablenames: 1636 for field in db[table]: 1637 append(field) 1638 return new_fields
1639
1640 - def _select(self, query, fields, attributes):
1641 tables = self.tables 1642 for key in set(attributes.keys())-SELECT_ARGS: 1643 raise SyntaxError('invalid select attribute: %s' % key) 1644 args_get = attributes.get 1645 tablenames = tables(query) 1646 tablenames_for_common_filters = tablenames 1647 for field in fields: 1648 if isinstance(field, basestring) \ 1649 and REGEX_TABLE_DOT_FIELD.match(field): 1650 tn,fn = field.split('.') 1651 field = self.db[tn][fn] 1652 for tablename in tables(field): 1653 if not tablename in tablenames: 1654 tablenames.append(tablename) 1655 1656 if len(tablenames) < 1: 1657 raise SyntaxError('Set: no tables selected') 1658 def colexpand(field): 1659 return self.expand(field, colnames=True)
1660 self._colnames = map(colexpand, fields) 1661 def geoexpand(field): 1662 if isinstance(field.type,str) and field.type.startswith('geometry') and isinstance(field, Field): 1663 field = field.st_astext() 1664 return self.expand(field) 1665 sql_f = ', '.join(map(geoexpand, fields)) 1666 sql_o = '' 1667 sql_s = '' 1668 left = args_get('left', False) 1669 inner_join = args_get('join', False) 1670 distinct = args_get('distinct', False) 1671 groupby = args_get('groupby', False) 1672 orderby = args_get('orderby', False) 1673 having = args_get('having', False) 1674 limitby = args_get('limitby', False) 1675 orderby_on_limitby = args_get('orderby_on_limitby', True) 1676 for_update = args_get('for_update', False) 1677 if self.can_select_for_update is False and for_update is True: 1678 raise SyntaxError('invalid select attribute: for_update') 1679 if distinct is True: 1680 sql_s += 'DISTINCT' 1681 elif distinct: 1682 sql_s += 'DISTINCT ON (%s)' % distinct 1683 if inner_join: 1684 icommand = self.JOIN() 1685 if not isinstance(inner_join, (tuple, list)): 1686 inner_join = [inner_join] 1687 ijoint = [t._tablename for t in inner_join 1688 if not isinstance(t,Expression)] 1689 ijoinon = [t for t in inner_join if isinstance(t, Expression)] 1690 itables_to_merge={} #issue 490 1691 [itables_to_merge.update( 1692 dict.fromkeys(tables(t))) for t in ijoinon] 1693 ijoinont = [t.first._tablename for t in ijoinon] 1694 [itables_to_merge.pop(t) for t in ijoinont 1695 if t in itables_to_merge] #issue 490 1696 iimportant_tablenames = ijoint + ijoinont + itables_to_merge.keys() 1697 iexcluded = [t for t in tablenames 1698 if not t in iimportant_tablenames] 1699 if left: 1700 join = attributes['left'] 1701 command = self.LEFT_JOIN() 1702 if not isinstance(join, (tuple, list)): 1703 join = [join] 1704 joint = [t._tablename for t in join 1705 if not isinstance(t, Expression)] 1706 joinon = [t for t in join if isinstance(t, Expression)] 1707 #patch join+left patch (solves problem with ordering in left joins) 1708 tables_to_merge={} 1709 [tables_to_merge.update( 1710 dict.fromkeys(tables(t))) for t in joinon] 1711 joinont = [t.first._tablename for t in joinon] 1712 [tables_to_merge.pop(t) for t in joinont if t in tables_to_merge] 1713 tablenames_for_common_filters = [t for t in tablenames 1714 if not t in joinont ] 1715 important_tablenames = joint + joinont + tables_to_merge.keys() 1716 excluded = [t for t in tablenames 1717 if not t in important_tablenames ] 1718 else: 1719 excluded = tablenames 1720 1721 if use_common_filters(query): 1722 query = self.common_filter(query,tablenames_for_common_filters) 1723 sql_w = ' WHERE ' + self.expand(query) if query else '' 1724 1725 if inner_join and not left: 1726 sql_t = ', '.join([self.table_alias(t) for t in iexcluded + \ 1727 itables_to_merge.keys()]) 1728 for t in ijoinon: 1729 sql_t += ' %s %s' % (icommand, t) 1730 elif not inner_join and left: 1731 sql_t = ', '.join([self.table_alias(t) for t in excluded + \ 1732 tables_to_merge.keys()]) 1733 if joint: 1734 sql_t += ' %s %s' % (command, 1735 ','.join([self.table_alias(t) for t in joint])) 1736 for t in joinon: 1737 sql_t += ' %s %s' % (command, t) 1738 elif inner_join and left: 1739 all_tables_in_query = set(important_tablenames + \ 1740 iimportant_tablenames + \ 1741 tablenames) 1742 tables_in_joinon = set(joinont + ijoinont) 1743 tables_not_in_joinon = \ 1744 all_tables_in_query.difference(tables_in_joinon) 1745 sql_t = ','.join([self.table_alias(t) for t in tables_not_in_joinon]) 1746 for t in ijoinon: 1747 sql_t += ' %s %s' % (icommand, t) 1748 if joint: 1749 sql_t += ' %s %s' % (command, 1750 ','.join([self.table_alias(t) for t in joint])) 1751 for t in joinon: 1752 sql_t += ' %s %s' % (command, t) 1753 else: 1754 sql_t = ', '.join(self.table_alias(t) for t in tablenames) 1755 if groupby: 1756 if isinstance(groupby, (list, tuple)): 1757 groupby = xorify(groupby) 1758 sql_o += ' GROUP BY %s' % self.expand(groupby) 1759 if having: 1760 sql_o += ' HAVING %s' % attributes['having'] 1761 if orderby: 1762 if isinstance(orderby, (list, tuple)): 1763 orderby = xorify(orderby) 1764 if str(orderby) == '<random>': 1765 sql_o += ' ORDER BY %s' % self.RANDOM() 1766 else: 1767 sql_o += ' ORDER BY %s' % self.expand(orderby) 1768 if (limitby and not groupby and tablenames and orderby_on_limitby and not orderby): 1769 sql_o += ' ORDER BY %s' % ', '.join( 1770 ['%s.%s'%(t,x) for t in tablenames for x in ( 1771 hasattr(self.db[t],'_primarykey') and self.db[t]._primarykey 1772 or [self.db[t]._id._rname or self.db[t]._id.name] 1773 ) 1774 ] 1775 ) 1776 # oracle does not support limitby 1777 sql = self.select_limitby(sql_s, sql_f, sql_t, sql_w, sql_o, limitby) 1778 if for_update and self.can_select_for_update is True: 1779 sql = sql.rstrip(';') + ' FOR UPDATE;' 1780 return sql 1781
1782 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
1783 if limitby: 1784 (lmin, lmax) = limitby 1785 sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin) 1786 return 'SELECT %s %s FROM %s%s%s;' % \ 1787 (sql_s, sql_f, sql_t, sql_w, sql_o)
1788
1789 - def _fetchall(self):
1790 return self.cursor.fetchall()
1791
1792 - def _select_aux(self,sql,fields,attributes):
1793 args_get = attributes.get 1794 cache = args_get('cache',None) 1795 if not cache: 1796 self.execute(sql) 1797 rows = self._fetchall() 1798 else: 1799 (cache_model, time_expire) = cache 1800 key = self.uri + '/' + sql + '/rows' 1801 if len(key)>200: key = hashlib_md5(key).hexdigest() 1802 def _select_aux2(): 1803 self.execute(sql) 1804 return self._fetchall()
1805 rows = cache_model(key,_select_aux2,time_expire) 1806 if isinstance(rows,tuple): 1807 rows = list(rows) 1808 limitby = args_get('limitby', None) or (0,) 1809 rows = self.rowslice(rows,limitby[0],None) 1810 processor = args_get('processor',self.parse) 1811 cacheable = args_get('cacheable',False) 1812 return processor(rows,fields,self._colnames,cacheable=cacheable) 1813
1814 - def select(self, query, fields, attributes):
1815 """ 1816 Always returns a Rows object, possibly empty. 1817 """ 1818 sql = self._select(query, fields, attributes) 1819 cache = attributes.get('cache', None) 1820 if cache and attributes.get('cacheable',False): 1821 del attributes['cache'] 1822 (cache_model, time_expire) = cache 1823 key = self.uri + '/' + sql 1824 if len(key)>200: key = hashlib_md5(key).hexdigest() 1825 args = (sql,fields,attributes) 1826 return cache_model( 1827 key, 1828 lambda self=self,args=args:self._select_aux(*args), 1829 time_expire) 1830 else: 1831 return self._select_aux(sql,fields,attributes)
1832
1833 - def _count(self, query, distinct=None):
1834 tablenames = self.tables(query) 1835 if query: 1836 if use_common_filters(query): 1837 query = self.common_filter(query, tablenames) 1838 sql_w = ' WHERE ' + self.expand(query) 1839 else: 1840 sql_w = '' 1841 sql_t = ','.join(self.table_alias(t) for t in tablenames) 1842 if distinct: 1843 if isinstance(distinct,(list, tuple)): 1844 distinct = xorify(distinct) 1845 sql_d = self.expand(distinct) 1846 return 'SELECT count(DISTINCT %s) FROM %s%s;' % \ 1847 (sql_d, sql_t, sql_w) 1848 return 'SELECT count(*) FROM %s%s;' % (sql_t, sql_w)
1849
1850 - def count(self, query, distinct=None):
1851 self.execute(self._count(query, distinct)) 1852 return self.cursor.fetchone()[0]
1853
1854 - def tables(self, *queries):
1855 tables = set() 1856 for query in queries: 1857 if isinstance(query, Field): 1858 tables.add(query.tablename) 1859 elif isinstance(query, (Expression, Query)): 1860 if not query.first is None: 1861 tables = tables.union(self.tables(query.first)) 1862 if not query.second is None: 1863 tables = tables.union(self.tables(query.second)) 1864 return list(tables)
1865
1866 - def commit(self):
1867 if self.connection: 1868 return self.connection.commit()
1869
1870 - def rollback(self):
1871 if self.connection: 1872 return self.connection.rollback()
1873
1874 - def close_connection(self):
1875 if self.connection: 1876 r = self.connection.close() 1877 self.connection = None 1878 return r
1879
1880 - def distributed_transaction_begin(self, key):
1881 return
1882
1883 - def prepare(self, key):
1884 if self.connection: self.connection.prepare()
1885
1886 - def commit_prepared(self, key):
1887 if self.connection: self.connection.commit()
1888
1889 - def rollback_prepared(self, key):
1890 if self.connection: self.connection.rollback()
1891
1892 - def concat_add(self, tablename):
1893 return ', ADD '
1894
1895 - def constraint_name(self, table, fieldname):
1896 return '%s_%s__constraint' % (table,fieldname)
1897
1898 - def create_sequence_and_triggers(self, query, table, **args):
1899 self.execute(query)
1900
1901 - def log_execute(self, *a, **b):
1902 if not self.connection: return None 1903 command = a[0] 1904 if hasattr(self,'filter_sql_command'): 1905 command = self.filter_sql_command(command) 1906 if self.db._debug: 1907 LOGGER.debug('SQL: %s' % command) 1908 self.db._lastsql = command 1909 t0 = time.time() 1910 ret = self.cursor.execute(command, *a[1:], **b) 1911 self.db._timings.append((command,time.time()-t0)) 1912 del self.db._timings[:-TIMINGSSIZE] 1913 return ret
1914
1915 - def execute(self, *a, **b):
1916 return self.log_execute(*a, **b)
1917
1918 - def represent(self, obj, fieldtype):
1919 field_is_type = fieldtype.startswith 1920 if isinstance(obj, CALLABLETYPES): 1921 obj = obj() 1922 if isinstance(fieldtype, SQLCustomType): 1923 value = fieldtype.encoder(obj) 1924 if fieldtype.type in ('string','text', 'json'): 1925 return self.adapt(value) 1926 return value 1927 if isinstance(obj, (Expression, Field)): 1928 return str(obj) 1929 if field_is_type('list:'): 1930 if not obj: 1931 obj = [] 1932 elif not isinstance(obj, (list, tuple)): 1933 obj = [obj] 1934 if field_is_type('list:string'): 1935 obj = map(str,obj) 1936 else: 1937 obj = map(int,[o for o in obj if o != '']) 1938 # we don't want to bar_encode json objects 1939 if isinstance(obj, (list, tuple)) and (not fieldtype == "json"): 1940 obj = bar_encode(obj) 1941 if obj is None: 1942 return 'NULL' 1943 if obj == '' and not fieldtype[:2] in ['st', 'te', 'js', 'pa', 'up']: 1944 return 'NULL' 1945 r = self.represent_exceptions(obj, fieldtype) 1946 if not r is None: 1947 return r 1948 if fieldtype == 'boolean': 1949 if obj and not str(obj)[:1].upper() in '0F': 1950 return self.smart_adapt(self.TRUE) 1951 else: 1952 return self.smart_adapt(self.FALSE) 1953 if fieldtype == 'id' or fieldtype == 'integer': 1954 return str(long(obj)) 1955 if field_is_type('decimal'): 1956 return str(obj) 1957 elif field_is_type('reference'): # reference 1958 if fieldtype.find('.')>0: 1959 return repr(obj) 1960 elif isinstance(obj, (Row, Reference)): 1961 return str(obj['id']) 1962 return str(long(obj)) 1963 elif fieldtype == 'double': 1964 return repr(float(obj)) 1965 if isinstance(obj, unicode): 1966 obj = obj.encode(self.db_codec) 1967 if fieldtype == 'blob': 1968 obj = base64.b64encode(str(obj)) 1969 elif fieldtype == 'date': 1970 if isinstance(obj, (datetime.date, datetime.datetime)): 1971 obj = obj.isoformat()[:10] 1972 else: 1973 obj = str(obj) 1974 elif fieldtype == 'datetime': 1975 if isinstance(obj, datetime.datetime): 1976 obj = obj.isoformat(self.T_SEP)[:19] 1977 elif isinstance(obj, datetime.date): 1978 obj = obj.isoformat()[:10]+self.T_SEP+'00:00:00' 1979 else: 1980 obj = str(obj) 1981 elif fieldtype == 'time': 1982 if isinstance(obj, datetime.time): 1983 obj = obj.isoformat()[:10] 1984 else: 1985 obj = str(obj) 1986 elif fieldtype == 'json': 1987 if not self.native_json: 1988 if have_serializers: 1989 obj = serializers.json(obj) 1990 elif simplejson: 1991 obj = simplejson.dumps(obj) 1992 else: 1993 raise RuntimeError("missing simplejson") 1994 if not isinstance(obj,bytes): 1995 obj = bytes(obj) 1996 try: 1997 obj.decode(self.db_codec) 1998 except: 1999 obj = obj.decode('latin1').encode(self.db_codec) 2000 return self.adapt(obj)
2001
2002 - def represent_exceptions(self, obj, fieldtype):
2003 return None
2004
2005 - def lastrowid(self, table):
2006 return None
2007
2008 - def rowslice(self, rows, minimum=0, maximum=None):
2009 """ 2010 By default this function does nothing; 2011 overload when db does not do slicing. 2012 """ 2013 return rows
2014
2015 - def parse_value(self, value, field_type, blob_decode=True):
2016 if field_type != 'blob' and isinstance(value, str): 2017 try: 2018 value = value.decode(self.db._db_codec) 2019 except Exception: 2020 pass 2021 if isinstance(value, unicode): 2022 value = value.encode('utf-8') 2023 if isinstance(field_type, SQLCustomType): 2024 value = field_type.decoder(value) 2025 if not isinstance(field_type, str) or value is None: 2026 return value 2027 elif field_type in ('string', 'text', 'password', 'upload', 'dict'): 2028 return value 2029 elif field_type.startswith('geo'): 2030 return value 2031 elif field_type == 'blob' and not blob_decode: 2032 return value 2033 else: 2034 key = REGEX_TYPE.match(field_type).group(0) 2035 return self.parsemap[key](value,field_type)
2036
2037 - def parse_reference(self, value, field_type):
2038 referee = field_type[10:].strip() 2039 if not '.' in referee: 2040 value = Reference(value) 2041 value._table, value._record = self.db[referee], None 2042 return value
2043
2044 - def parse_boolean(self, value, field_type):
2045 return value == self.TRUE or str(value)[:1].lower() == 't'
2046
2047 - def parse_date(self, value, field_type):
2048 if isinstance(value, datetime.datetime): 2049 return value.date() 2050 if not isinstance(value, (datetime.date,datetime.datetime)): 2051 (y, m, d) = map(int, str(value)[:10].strip().split('-')) 2052 value = datetime.date(y, m, d) 2053 return value
2054
2055 - def parse_time(self, value, field_type):
2056 if not isinstance(value, datetime.time): 2057 time_items = map(int,str(value)[:8].strip().split(':')[:3]) 2058 if len(time_items) == 3: 2059 (h, mi, s) = time_items 2060 else: 2061 (h, mi, s) = time_items + [0] 2062 value = datetime.time(h, mi, s) 2063 return value
2064
2065 - def parse_datetime(self, value, field_type):
2066 if not isinstance(value, datetime.datetime): 2067 value = str(value) 2068 date_part,time_part,timezone = value[:10],value[11:19],value[19:] 2069 if '+' in timezone: 2070 ms,tz = timezone.split('+') 2071 h,m = tz.split(':') 2072 dt = datetime.timedelta(seconds=3600*int(h)+60*int(m)) 2073 elif '-' in timezone: 2074 ms,tz = timezone.split('-') 2075 h,m = tz.split(':') 2076 dt = -datetime.timedelta(seconds=3600*int(h)+60*int(m)) 2077 else: 2078 dt = None 2079 (y, m, d) = map(int,date_part.split('-')) 2080 time_parts = time_part and time_part.split(':')[:3] or (0,0,0) 2081 while len(time_parts)<3: time_parts.append(0) 2082 time_items = map(int,time_parts) 2083 (h, mi, s) = time_items 2084 value = datetime.datetime(y, m, d, h, mi, s) 2085 if dt: 2086 value = value + dt 2087 return value
2088
2089 - def parse_blob(self, value, field_type):
2090 return base64.b64decode(str(value))
2091
2092 - def parse_decimal(self, value, field_type):
2093 decimals = int(field_type[8:-1].split(',')[-1]) 2094 if self.dbengine in ('sqlite', 'spatialite'): 2095 value = ('%.' + str(decimals) + 'f') % value 2096 if not isinstance(value, decimal.Decimal): 2097 value = decimal.Decimal(str(value)) 2098 return value
2099
2100 - def parse_list_integers(self, value, field_type):
2101 if not isinstance(self, NoSQLAdapter): 2102 value = bar_decode_integer(value) 2103 return value
2104
2105 - def parse_list_references(self, value, field_type):
2106 if not isinstance(self, NoSQLAdapter): 2107 value = bar_decode_integer(value) 2108 return [self.parse_reference(r, field_type[5:]) for r in value]
2109
2110 - def parse_list_strings(self, value, field_type):
2111 if not isinstance(self, NoSQLAdapter): 2112 value = bar_decode_string(value) 2113 return value
2114
2115 - def parse_id(self, value, field_type):
2116 return long(value)
2117
2118 - def parse_integer(self, value, field_type):
2119 return long(value)
2120
2121 - def parse_double(self, value, field_type):
2122 return float(value)
2123
2124 - def parse_json(self, value, field_type):
2125 if not self.native_json: 2126 if not isinstance(value, basestring): 2127 raise RuntimeError('json data not a string') 2128 if isinstance(value, unicode): 2129 value = value.encode('utf-8') 2130 if have_serializers: 2131 value = serializers.loads_json(value) 2132 elif simplejson: 2133 value = simplejson.loads(value) 2134 else: 2135 raise RuntimeError("missing simplejson") 2136 return value
2137
2138 - def build_parsemap(self):
2139 self.parsemap = { 2140 'id':self.parse_id, 2141 'integer':self.parse_integer, 2142 'bigint':self.parse_integer, 2143 'float':self.parse_double, 2144 'double':self.parse_double, 2145 'reference':self.parse_reference, 2146 'boolean':self.parse_boolean, 2147 'date':self.parse_date, 2148 'time':self.parse_time, 2149 'datetime':self.parse_datetime, 2150 'blob':self.parse_blob, 2151 'decimal':self.parse_decimal, 2152 'json':self.parse_json, 2153 'list:integer':self.parse_list_integers, 2154 'list:reference':self.parse_list_references, 2155 'list:string':self.parse_list_strings, 2156 }
2157
2158 - def parse(self, rows, fields, colnames, blob_decode=True, 2159 cacheable = False):
2160 db = self.db 2161 virtualtables = [] 2162 new_rows = [] 2163 tmps = [] 2164 for colname in colnames: 2165 if not REGEX_TABLE_DOT_FIELD.match(colname): 2166 tmps.append(None) 2167 else: 2168 (tablename, _the_sep_, fieldname) = colname.partition('.') 2169 table = db[tablename] 2170 field = table[fieldname] 2171 ft = field.type 2172 tmps.append((tablename,fieldname,table,field,ft)) 2173 for (i,row) in enumerate(rows): 2174 new_row = Row() 2175 for (j,colname) in enumerate(colnames): 2176 value = row[j] 2177 tmp = tmps[j] 2178 if tmp: 2179 (tablename,fieldname,table,field,ft) = tmp 2180 if tablename in new_row: 2181 colset = new_row[tablename] 2182 else: 2183 colset = new_row[tablename] = Row() 2184 if tablename not in virtualtables: 2185 virtualtables.append(tablename) 2186 value = self.parse_value(value,ft,blob_decode) 2187 if field.filter_out: 2188 value = field.filter_out(value) 2189 colset[fieldname] = value 2190 2191 # for backward compatibility 2192 if ft=='id' and fieldname!='id' and \ 2193 not 'id' in table.fields: 2194 colset['id'] = value 2195 2196 if ft == 'id' and not cacheable: 2197 # temporary hack to deal with 2198 # GoogleDatastoreAdapter 2199 # references 2200 if isinstance(self, GoogleDatastoreAdapter): 2201 id = value.key.id() if self.use_ndb else value.key().id_or_name() 2202 colset[fieldname] = id 2203 colset.gae_item = value 2204 else: 2205 id = value 2206 colset.update_record = RecordUpdater(colset,table,id) 2207 colset.delete_record = RecordDeleter(table,id) 2208 if table._db._lazy_tables: 2209 colset['__get_lazy_reference__'] = LazyReferenceGetter(table, id) 2210 for rfield in table._referenced_by: 2211 referee_link = db._referee_name and \ 2212 db._referee_name % dict( 2213 table=rfield.tablename,field=rfield.name) 2214 if referee_link and not referee_link in colset: 2215 colset[referee_link] = LazySet(rfield,id) 2216 else: 2217 if not '_extra' in new_row: 2218 new_row['_extra'] = Row() 2219 new_row['_extra'][colname] = \ 2220 self.parse_value(value, 2221 fields[j].type,blob_decode) 2222 new_column_name = \ 2223 REGEX_SELECT_AS_PARSER.search(colname) 2224 if not new_column_name is None: 2225 column_name = new_column_name.groups(0) 2226 setattr(new_row,column_name[0],value) 2227 new_rows.append(new_row) 2228 rowsobj = Rows(db, new_rows, colnames, rawrows=rows) 2229 2230 2231 for tablename in virtualtables: 2232 table = db[tablename] 2233 fields_virtual = [(f,v) for (f,v) in table.iteritems() 2234 if isinstance(v,FieldVirtual)] 2235 fields_lazy = [(f,v) for (f,v) in table.iteritems() 2236 if isinstance(v,FieldMethod)] 2237 if fields_virtual or fields_lazy: 2238 for row in rowsobj.records: 2239 box = row[tablename] 2240 for f,v in fields_virtual: 2241 try: 2242 box[f] = v.f(row) 2243 except AttributeError: 2244 pass # not enough fields to define virtual field 2245 for f,v in fields_lazy: 2246 try: 2247 box[f] = (v.handler or VirtualCommand)(v.f,row) 2248 except AttributeError: 2249 pass # not enough fields to define virtual field 2250 2251 ### old style virtual fields 2252 for item in table.virtualfields: 2253 try: 2254 rowsobj = rowsobj.setvirtualfields(**{tablename:item}) 2255 except (KeyError, AttributeError): 2256 # to avoid breaking virtualfields when partial select 2257 pass 2258 return rowsobj
2259
2260 - def common_filter(self, query, tablenames):
2261 tenant_fieldname = self.db._request_tenant 2262 2263 for tablename in tablenames: 2264 table = self.db[tablename] 2265 2266 # deal with user provided filters 2267 if table._common_filter != None: 2268 query = query & table._common_filter(query) 2269 2270 # deal with multi_tenant filters 2271 if tenant_fieldname in table: 2272 default = table[tenant_fieldname].default 2273 if not default is None: 2274 newquery = table[tenant_fieldname] == default 2275 if query is None: 2276 query = newquery 2277 else: 2278 query = query & newquery 2279 return query
2280
2281 - def CASE(self,query,t,f):
2282 def represent(x): 2283 types = {type(True):'boolean',type(0):'integer',type(1.0):'double'} 2284 if x is None: return 'NULL' 2285 elif isinstance(x,Expression): return str(x) 2286 else: return self.represent(x,types.get(type(x),'string'))
2287 return Expression(self.db,'CASE WHEN %s THEN %s ELSE %s END' % \ 2288 (self.expand(query),represent(t),represent(f))) 2289
2290 ################################################################################### 2291 # List of all the available adapters; they all extend BaseAdapter. 2292 ################################################################################### 2293 2294 -class SQLiteAdapter(BaseAdapter):
2295 drivers = ('sqlite2','sqlite3') 2296 2297 can_select_for_update = None # support ourselves with BEGIN TRANSACTION 2298
2299 - def EXTRACT(self,field,what):
2300 return "web2py_extract('%s',%s)" % (what, self.expand(field))
2301 2302 @staticmethod
2303 - def web2py_extract(lookup, s):
2304 table = { 2305 'year': (0, 4), 2306 'month': (5, 7), 2307 'day': (8, 10), 2308 'hour': (11, 13), 2309 'minute': (14, 16), 2310 'second': (17, 19), 2311 } 2312 try: 2313 if lookup != 'epoch': 2314 (i, j) = table[lookup] 2315 return int(s[i:j]) 2316 else: 2317 return time.mktime(datetime.datetime.strptime(s, '%Y-%m-%d %H:%M:%S').timetuple()) 2318 except: 2319 return None
2320 2321 @staticmethod
2322 - def web2py_regexp(expression, item):
2323 return re.compile(expression).search(item) is not None
2324
2325 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8', 2326 credential_decoder=IDENTITY, driver_args={}, 2327 adapter_args={}, do_connect=True, after_connection=None):
2328 self.db = db 2329 self.dbengine = "sqlite" 2330 self.uri = uri 2331 self.adapter_args = adapter_args 2332 if do_connect: self.find_driver(adapter_args) 2333 self.pool_size = 0 2334 self.folder = folder 2335 self.db_codec = db_codec 2336 self._after_connection = after_connection 2337 self.find_or_make_work_folder() 2338 path_encoding = sys.getfilesystemencoding() \ 2339 or locale.getdefaultlocale()[1] or 'utf8' 2340 if uri.startswith('sqlite:memory'): 2341 self.dbpath = ':memory:' 2342 else: 2343 self.dbpath = uri.split('://',1)[1] 2344 if self.dbpath[0] != '/': 2345 if PYTHON_VERSION[0] == 2: 2346 self.dbpath = pjoin( 2347 self.folder.decode(path_encoding).encode('utf8'), self.dbpath) 2348 else: 2349 self.dbpath = pjoin(self.folder, self.dbpath) 2350 if not 'check_same_thread' in driver_args: 2351 driver_args['check_same_thread'] = False 2352 if not 'detect_types' in driver_args and do_connect: 2353 driver_args['detect_types'] = self.driver.PARSE_DECLTYPES 2354 def connector(dbpath=self.dbpath, driver_args=driver_args): 2355 return self.driver.Connection(dbpath, **driver_args)
2356 self.connector = connector 2357 if do_connect: self.reconnect()
2358
2359 - def after_connection(self):
2360 self.connection.create_function('web2py_extract', 2, 2361 SQLiteAdapter.web2py_extract) 2362 self.connection.create_function("REGEXP", 2, 2363 SQLiteAdapter.web2py_regexp) 2364 2365 if self.adapter_args.get('foreign_keys',True): 2366 self.execute('PRAGMA foreign_keys=ON;')
2367
2368 - def _truncate(self, table, mode=''):
2369 tablename = table._tablename 2370 return ['DELETE FROM %s;' % tablename, 2371 "DELETE FROM sqlite_sequence WHERE name='%s';" % tablename]
2372
2373 - def lastrowid(self, table):
2374 return self.cursor.lastrowid
2375
2376 - def REGEXP(self,first,second):
2377 return '(%s REGEXP %s)' % (self.expand(first), 2378 self.expand(second,'string'))
2379
2380 - def select(self, query, fields, attributes):
2381 """ 2382 Simulate SELECT ... FOR UPDATE with BEGIN IMMEDIATE TRANSACTION. 2383 Note that the entire database, rather than one record, is locked 2384 (it will be locked eventually anyway by the following UPDATE). 2385 """ 2386 if attributes.get('for_update', False) and not 'cache' in attributes: 2387 self.execute('BEGIN IMMEDIATE TRANSACTION;') 2388 return super(SQLiteAdapter, self).select(query, fields, attributes)
2389
2390 -class SpatiaLiteAdapter(SQLiteAdapter):
2391 drivers = ('sqlite3','sqlite2') 2392 2393 types = copy.copy(BaseAdapter.types) 2394 types.update(geometry='GEOMETRY') 2395
2396 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8', 2397 credential_decoder=IDENTITY, driver_args={}, 2398 adapter_args={}, do_connect=True, srid=4326, after_connection=None):
2399 self.db = db 2400 self.dbengine = "spatialite" 2401 self.uri = uri 2402 if do_connect: self.find_driver(adapter_args) 2403 self.pool_size = 0 2404 self.folder = folder 2405 self.db_codec = db_codec 2406 self._after_connection = after_connection 2407 self.find_or_make_work_folder() 2408 self.srid = srid 2409 path_encoding = sys.getfilesystemencoding() \ 2410 or locale.getdefaultlocale()[1] or 'utf8' 2411 if uri.startswith('spatialite:memory'): 2412 self.dbpath = ':memory:' 2413 else: 2414 self.dbpath = uri.split('://',1)[1] 2415 if self.dbpath[0] != '/': 2416 self.dbpath = pjoin( 2417 self.folder.decode(path_encoding).encode('utf8'), self.dbpath) 2418 if not 'check_same_thread' in driver_args: 2419 driver_args['check_same_thread'] = False 2420 if not 'detect_types' in driver_args and do_connect: 2421 driver_args['detect_types'] = self.driver.PARSE_DECLTYPES 2422 def connector(dbpath=self.dbpath, driver_args=driver_args): 2423 return self.driver.Connection(dbpath, **driver_args)
2424 self.connector = connector 2425 if do_connect: self.reconnect()
2426
2427 - def after_connection(self):
2428 self.connection.enable_load_extension(True) 2429 # for Windows, rename libspatialite-2.dll to libspatialite.dll 2430 # Linux uses libspatialite.so 2431 # Mac OS X uses libspatialite.dylib 2432 libspatialite = SPATIALLIBS[platform.system()] 2433 self.execute(r'SELECT load_extension("%s");' % libspatialite) 2434 2435 self.connection.create_function('web2py_extract', 2, 2436 SQLiteAdapter.web2py_extract) 2437 self.connection.create_function("REGEXP", 2, 2438 SQLiteAdapter.web2py_regexp)
2439 2440 # GIS functions 2441
2442 - def ST_ASGEOJSON(self, first, second):
2443 return 'AsGeoJSON(%s,%s,%s)' %(self.expand(first), 2444 second['precision'], second['options'])
2445
2446 - def ST_ASTEXT(self, first):
2447 return 'AsText(%s)' %(self.expand(first))
2448
2449 - def ST_CONTAINS(self, first, second):
2450 return 'Contains(%s,%s)' %(self.expand(first), 2451 self.expand(second, first.type))
2452
2453 - def ST_DISTANCE(self, first, second):
2454 return 'Distance(%s,%s)' %(self.expand(first), 2455 self.expand(second, first.type))
2456
2457 - def ST_EQUALS(self, first, second):
2458 return 'Equals(%s,%s)' %(self.expand(first), 2459 self.expand(second, first.type))
2460
2461 - def ST_INTERSECTS(self, first, second):
2462 return 'Intersects(%s,%s)' %(self.expand(first), 2463 self.expand(second, first.type))
2464
2465 - def ST_OVERLAPS(self, first, second):
2466 return 'Overlaps(%s,%s)' %(self.expand(first), 2467 self.expand(second, first.type))
2468
2469 - def ST_SIMPLIFY(self, first, second):
2470 return 'Simplify(%s,%s)' %(self.expand(first), 2471 self.expand(second, 'double'))
2472
2473 - def ST_TOUCHES(self, first, second):
2474 return 'Touches(%s,%s)' %(self.expand(first), 2475 self.expand(second, first.type))
2476
2477 - def ST_WITHIN(self, first, second):
2478 return 'Within(%s,%s)' %(self.expand(first), 2479 self.expand(second, first.type))
2480
2481 - def represent(self, obj, fieldtype):
2482 field_is_type = fieldtype.startswith 2483 if field_is_type('geo'): 2484 srid = 4326 # Spatialite default srid for geometry 2485 geotype, parms = fieldtype[:-1].split('(') 2486 parms = parms.split(',') 2487 if len(parms) >= 2: 2488 schema, srid = parms[:2] 2489 # if field_is_type('geometry'): 2490 value = "ST_GeomFromText('%s',%s)" %(obj, srid) 2491 # elif field_is_type('geography'): 2492 # value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj) 2493 # else: 2494 # raise SyntaxError, 'Invalid field type %s' %fieldtype 2495 return value 2496 return BaseAdapter.represent(self, obj, fieldtype)
2497
2498 2499 -class JDBCSQLiteAdapter(SQLiteAdapter):
2500 drivers = ('zxJDBC_sqlite',) 2501
2502 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', 2503 credential_decoder=IDENTITY, driver_args={}, 2504 adapter_args={}, do_connect=True, after_connection=None):
2505 self.db = db 2506 self.dbengine = "sqlite" 2507 self.uri = uri 2508 if do_connect: self.find_driver(adapter_args) 2509 self.pool_size = pool_size 2510 self.folder = folder 2511 self.db_codec = db_codec 2512 self._after_connection = after_connection 2513 self.find_or_make_work_folder() 2514 path_encoding = sys.getfilesystemencoding() \ 2515 or locale.getdefaultlocale()[1] or 'utf8' 2516 if uri.startswith('sqlite:memory'): 2517 self.dbpath = ':memory:' 2518 else: 2519 self.dbpath = uri.split('://',1)[1] 2520 if self.dbpath[0] != '/': 2521 self.dbpath = pjoin( 2522 self.folder.decode(path_encoding).encode('utf8'), self.dbpath) 2523 def connector(dbpath=self.dbpath,driver_args=driver_args): 2524 return self.driver.connect( 2525 self.driver.getConnection('jdbc:sqlite:'+dbpath), 2526 **driver_args)
2527 self.connector = connector 2528 if do_connect: self.reconnect()
2529
2530 - def after_connection(self):
2531 # FIXME http://www.zentus.com/sqlitejdbc/custom_functions.html for UDFs 2532 self.connection.create_function('web2py_extract', 2, 2533 SQLiteAdapter.web2py_extract)
2534
2535 - def execute(self, a):
2536 return self.log_execute(a)
2537
2538 2539 -class MySQLAdapter(BaseAdapter):
2540 drivers = ('MySQLdb','pymysql', 'mysqlconnector') 2541 2542 commit_on_alter_table = True 2543 support_distributed_transaction = True 2544 types = { 2545 'boolean': 'CHAR(1)', 2546 'string': 'VARCHAR(%(length)s)', 2547 'text': 'LONGTEXT', 2548 'json': 'LONGTEXT', 2549 'password': 'VARCHAR(%(length)s)', 2550 'blob': 'LONGBLOB', 2551 'upload': 'VARCHAR(%(length)s)', 2552 'integer': 'INT', 2553 'bigint': 'BIGINT', 2554 'float': 'FLOAT', 2555 'double': 'DOUBLE', 2556 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2557 'date': 'DATE', 2558 'time': 'TIME', 2559 'datetime': 'DATETIME', 2560 'id': 'INT AUTO_INCREMENT NOT NULL', 2561 'reference': 'INT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2562 'list:integer': 'LONGTEXT', 2563 'list:string': 'LONGTEXT', 2564 'list:reference': 'LONGTEXT', 2565 'big-id': 'BIGINT AUTO_INCREMENT NOT NULL', 2566 'big-reference': 'BIGINT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2567 } 2568 2569 QUOTE_TEMPLATE = "`%s`" 2570
2571 - def varquote(self,name):
2572 return varquote_aux(name,'`%s`')
2573
2574 - def RANDOM(self):
2575 return 'RAND()'
2576
2577 - def SUBSTRING(self,field,parameters):
2578 return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), 2579 parameters[0], parameters[1])
2580
2581 - def EPOCH(self, first):
2582 return "UNIX_TIMESTAMP(%s)" % self.expand(first)
2583
2584 - def CONCAT(self, *items):
2585 return 'CONCAT(%s)' % ','.join(self.expand(x,'string') for x in items)
2586
2587 - def REGEXP(self,first,second):
2588 return '(%s REGEXP %s)' % (self.expand(first), 2589 self.expand(second,'string'))
2590
2591 - def _drop(self,table,mode):
2592 # breaks db integrity but without this mysql does not drop table 2593 table_rname = table._rname or table 2594 return ['SET FOREIGN_KEY_CHECKS=0;','DROP TABLE %s;' % table_rname, 2595 'SET FOREIGN_KEY_CHECKS=1;']
2596
2597 - def _insert_empty(self, table):
2598 return 'INSERT INTO %s VALUES (DEFAULT);' % table
2599
2600 - def distributed_transaction_begin(self,key):
2601 self.execute('XA START;')
2602
2603 - def prepare(self,key):
2604 self.execute("XA END;") 2605 self.execute("XA PREPARE;")
2606
2607 - def commit_prepared(self,ley):
2608 self.execute("XA COMMIT;")
2609
2610 - def rollback_prepared(self,key):
2611 self.execute("XA ROLLBACK;")
2612 2613 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$') 2614
2615 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2616 credential_decoder=IDENTITY, driver_args={}, 2617 adapter_args={}, do_connect=True, after_connection=None):
2618 self.db = db 2619 self.dbengine = "mysql" 2620 self.uri = uri 2621 if do_connect: self.find_driver(adapter_args,uri) 2622 self.pool_size = pool_size 2623 self.folder = folder 2624 self.db_codec = db_codec 2625 self._after_connection = after_connection 2626 self.find_or_make_work_folder() 2627 ruri = uri.split('://',1)[1] 2628 m = self.REGEX_URI.match(ruri) 2629 if not m: 2630 raise SyntaxError( 2631 "Invalid URI string in DAL: %s" % self.uri) 2632 user = credential_decoder(m.group('user')) 2633 if not user: 2634 raise SyntaxError('User required') 2635 password = credential_decoder(m.group('password')) 2636 if not password: 2637 password = '' 2638 host = m.group('host') 2639 if not host: 2640 raise SyntaxError('Host name required') 2641 db = m.group('db') 2642 if not db: 2643 raise SyntaxError('Database name required') 2644 port = int(m.group('port') or '3306') 2645 charset = m.group('charset') or 'utf8' 2646 driver_args.update(db=db, 2647 user=credential_decoder(user), 2648 passwd=credential_decoder(password), 2649 host=host, 2650 port=port, 2651 charset=charset) 2652 2653 2654 def connector(driver_args=driver_args): 2655 return self.driver.connect(**driver_args)
2656 self.connector = connector 2657 if do_connect: self.reconnect()
2658
2659 - def after_connection(self):
2660 self.execute('SET FOREIGN_KEY_CHECKS=1;') 2661 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
2662
2663 - def lastrowid(self,table):
2664 self.execute('select last_insert_id();') 2665 return int(self.cursor.fetchone()[0])
2666
2667 2668 -class PostgreSQLAdapter(BaseAdapter):
2669 drivers = ('psycopg2','pg8000') 2670 2671 support_distributed_transaction = True 2672 types = { 2673 'boolean': 'CHAR(1)', 2674 'string': 'VARCHAR(%(length)s)', 2675 'text': 'TEXT', 2676 'json': 'TEXT', 2677 'password': 'VARCHAR(%(length)s)', 2678 'blob': 'BYTEA', 2679 'upload': 'VARCHAR(%(length)s)', 2680 'integer': 'INTEGER', 2681 'bigint': 'BIGINT', 2682 'float': 'FLOAT', 2683 'double': 'FLOAT8', 2684 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2685 'date': 'DATE', 2686 'time': 'TIME', 2687 'datetime': 'TIMESTAMP', 2688 'id': 'SERIAL PRIMARY KEY', 2689 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2690 'list:integer': 'TEXT', 2691 'list:string': 'TEXT', 2692 'list:reference': 'TEXT', 2693 'geometry': 'GEOMETRY', 2694 'geography': 'GEOGRAPHY', 2695 'big-id': 'BIGSERIAL PRIMARY KEY', 2696 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2697 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2698 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 2699 2700 } 2701 2702 QUOTE_TEMPLATE = '"%s"' 2703
2704 - def varquote(self,name):
2705 return varquote_aux(name,'"%s"')
2706
2707 - def adapt(self,obj):
2708 if self.driver_name == 'psycopg2': 2709 return psycopg2_adapt(obj).getquoted() 2710 elif self.driver_name == 'pg8000': 2711 return "'%s'" % str(obj).replace("%","%%").replace("'","''") 2712 else: 2713 return "'%s'" % str(obj).replace("'","''")
2714
2715 - def sequence_name(self,table):
2716 return '%s_id_seq' % table
2717
2718 - def RANDOM(self):
2719 return 'RANDOM()'
2720
2721 - def ADD(self, first, second):
2722 t = first.type 2723 if t in ('text','string','password', 'json', 'upload','blob'): 2724 return '(%s || %s)' % (self.expand(first), self.expand(second, t)) 2725 else: 2726 return '(%s + %s)' % (self.expand(first), self.expand(second, t))
2727
2728 - def distributed_transaction_begin(self,key):
2729 return
2730
2731 - def prepare(self,key):
2732 self.execute("PREPARE TRANSACTION '%s';" % key)
2733
2734 - def commit_prepared(self,key):
2735 self.execute("COMMIT PREPARED '%s';" % key)
2736
2737 - def rollback_prepared(self,key):
2738 self.execute("ROLLBACK PREPARED '%s';" % key)
2739
2740 - def create_sequence_and_triggers(self, query, table, **args):
2741 # following lines should only be executed if table._sequence_name does not exist 2742 # self.execute('CREATE SEQUENCE %s;' % table._sequence_name) 2743 # self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ 2744 # % (table._tablename, table._fieldname, table._sequence_name)) 2745 self.execute(query)
2746 2747 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$') 2748
2749 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2750 credential_decoder=IDENTITY, driver_args={}, 2751 adapter_args={}, do_connect=True, srid=4326, 2752 after_connection=None):
2753 self.db = db 2754 self.dbengine = "postgres" 2755 self.uri = uri 2756 if do_connect: self.find_driver(adapter_args,uri) 2757 self.pool_size = pool_size 2758 self.folder = folder 2759 self.db_codec = db_codec 2760 self._after_connection = after_connection 2761 self.srid = srid 2762 self.find_or_make_work_folder() 2763 ruri = uri.split('://',1)[1] 2764 m = self.REGEX_URI.match(ruri) 2765 if not m: 2766 raise SyntaxError("Invalid URI string in DAL") 2767 user = credential_decoder(m.group('user')) 2768 if not user: 2769 raise SyntaxError('User required') 2770 password = credential_decoder(m.group('password')) 2771 if not password: 2772 password = '' 2773 host = m.group('host') 2774 if not host: 2775 raise SyntaxError('Host name required') 2776 db = m.group('db') 2777 if not db: 2778 raise SyntaxError('Database name required') 2779 port = m.group('port') or '5432' 2780 sslmode = m.group('sslmode') 2781 if sslmode: 2782 msg = ("dbname='%s' user='%s' host='%s' " 2783 "port=%s password='%s' sslmode='%s'") \ 2784 % (db, user, host, port, password, sslmode) 2785 else: 2786 msg = ("dbname='%s' user='%s' host='%s' " 2787 "port=%s password='%s'") \ 2788 % (db, user, host, port, password) 2789 # choose diver according uri 2790 if self.driver: 2791 self.__version__ = "%s %s" % (self.driver.__name__, 2792 self.driver.__version__) 2793 else: 2794 self.__version__ = None 2795 def connector(msg=msg,driver_args=driver_args): 2796 return self.driver.connect(msg,**driver_args)
2797 self.connector = connector 2798 if do_connect: self.reconnect()
2799
2800 - def after_connection(self):
2801 self.connection.set_client_encoding('UTF8') 2802 self.execute("SET standard_conforming_strings=on;") 2803 self.try_json()
2804
2805 - def lastrowid(self,table):
2806 self.execute("""select currval('"%s"')""" % table._sequence_name) 2807 return int(self.cursor.fetchone()[0])
2808
2809 - def try_json(self):
2810 # check JSON data type support 2811 # (to be added to after_connection) 2812 if self.driver_name == "pg8000": 2813 supports_json = self.connection.server_version >= "9.2.0" 2814 elif (self.driver_name == "psycopg2") and \ 2815 (self.driver.__version__ >= "2.0.12"): 2816 supports_json = self.connection.server_version >= 90200 2817 elif self.driver_name == "zxJDBC": 2818 supports_json = self.connection.dbversion >= "9.2.0" 2819 else: supports_json = None 2820 if supports_json: 2821 self.types["json"] = "JSON" 2822 self.native_json = True 2823 else: LOGGER.debug("Your database version does not support the JSON data type (using TEXT instead)")
2824
2825 - def LIKE(self,first,second):
2826 args = (self.expand(first), self.expand(second,'string')) 2827 if not first.type in ('string', 'text', 'json'): 2828 return '(%s LIKE %s)' % ( 2829 self.CAST(args[0], 'CHAR(%s)' % first.length), args[1]) 2830 else: 2831 return '(%s LIKE %s)' % args
2832
2833 - def ILIKE(self,first,second):
2834 args = (self.expand(first), self.expand(second,'string')) 2835 if not first.type in ('string', 'text', 'json'): 2836 return '(%s LIKE %s)' % ( 2837 self.CAST(args[0], 'CHAR(%s)' % first.length), args[1]) 2838 else: 2839 return '(%s ILIKE %s)' % args
2840
2841 - def REGEXP(self,first,second):
2842 return '(%s ~ %s)' % (self.expand(first), 2843 self.expand(second,'string'))
2844
2845 - def STARTSWITH(self,first,second):
2846 return '(%s ILIKE %s)' % (self.expand(first), 2847 self.expand(second+'%','string'))
2848
2849 - def ENDSWITH(self,first,second):
2850 return '(%s ILIKE %s)' % (self.expand(first), 2851 self.expand('%'+second,'string'))
2852 2853 # GIS functions 2854
2855 - def ST_ASGEOJSON(self, first, second):
2856 """ 2857 http://postgis.org/docs/ST_AsGeoJSON.html 2858 """ 2859 return 'ST_AsGeoJSON(%s,%s,%s,%s)' %(second['version'], 2860 self.expand(first), second['precision'], second['options'])
2861
2862 - def ST_ASTEXT(self, first):
2863 """ 2864 http://postgis.org/docs/ST_AsText.html 2865 """ 2866 return 'ST_AsText(%s)' %(self.expand(first))
2867
2868 - def ST_X(self, first):
2869 """ 2870 http://postgis.org/docs/ST_X.html 2871 """ 2872 return 'ST_X(%s)' %(self.expand(first))
2873
2874 - def ST_Y(self, first):
2875 """ 2876 http://postgis.org/docs/ST_Y.html 2877 """ 2878 return 'ST_Y(%s)' %(self.expand(first))
2879
2880 - def ST_CONTAINS(self, first, second):
2881 """ 2882 http://postgis.org/docs/ST_Contains.html 2883 """ 2884 return 'ST_Contains(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2885
2886 - def ST_DISTANCE(self, first, second):
2887 """ 2888 http://postgis.org/docs/ST_Distance.html 2889 """ 2890 return 'ST_Distance(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2891
2892 - def ST_EQUALS(self, first, second):
2893 """ 2894 http://postgis.org/docs/ST_Equals.html 2895 """ 2896 return 'ST_Equals(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2897
2898 - def ST_INTERSECTS(self, first, second):
2899 """ 2900 http://postgis.org/docs/ST_Intersects.html 2901 """ 2902 return 'ST_Intersects(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2903
2904 - def ST_OVERLAPS(self, first, second):
2905 """ 2906 http://postgis.org/docs/ST_Overlaps.html 2907 """ 2908 return 'ST_Overlaps(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2909
2910 - def ST_SIMPLIFY(self, first, second):
2911 """ 2912 http://postgis.org/docs/ST_Simplify.html 2913 """ 2914 return 'ST_Simplify(%s,%s)' %(self.expand(first), self.expand(second, 'double'))
2915
2916 - def ST_TOUCHES(self, first, second):
2917 """ 2918 http://postgis.org/docs/ST_Touches.html 2919 """ 2920 return 'ST_Touches(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2921
2922 - def ST_WITHIN(self, first, second):
2923 """ 2924 http://postgis.org/docs/ST_Within.html 2925 """ 2926 return 'ST_Within(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2927
2928 - def represent(self, obj, fieldtype):
2929 field_is_type = fieldtype.startswith 2930 if field_is_type('geo'): 2931 srid = 4326 # postGIS default srid for geometry 2932 geotype, parms = fieldtype[:-1].split('(') 2933 parms = parms.split(',') 2934 if len(parms) >= 2: 2935 schema, srid = parms[:2] 2936 if field_is_type('geometry'): 2937 value = "ST_GeomFromText('%s',%s)" %(obj, srid) 2938 elif field_is_type('geography'): 2939 value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj) 2940 # else: 2941 # raise SyntaxError('Invalid field type %s' %fieldtype) 2942 return value 2943 return BaseAdapter.represent(self, obj, fieldtype)
2944
2945 -class NewPostgreSQLAdapter(PostgreSQLAdapter):
2946 drivers = ('psycopg2','pg8000') 2947 2948 types = { 2949 'boolean': 'CHAR(1)', 2950 'string': 'VARCHAR(%(length)s)', 2951 'text': 'TEXT', 2952 'json': 'TEXT', 2953 'password': 'VARCHAR(%(length)s)', 2954 'blob': 'BYTEA', 2955 'upload': 'VARCHAR(%(length)s)', 2956 'integer': 'INTEGER', 2957 'bigint': 'BIGINT', 2958 'float': 'FLOAT', 2959 'double': 'FLOAT8', 2960 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2961 'date': 'DATE', 2962 'time': 'TIME', 2963 'datetime': 'TIMESTAMP', 2964 'id': 'SERIAL PRIMARY KEY', 2965 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2966 'list:integer': 'BIGINT[]', 2967 'list:string': 'TEXT[]', 2968 'list:reference': 'BIGINT[]', 2969 'geometry': 'GEOMETRY', 2970 'geography': 'GEOGRAPHY', 2971 'big-id': 'BIGSERIAL PRIMARY KEY', 2972 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2973 } 2974
2975 - def parse_list_integers(self, value, field_type):
2976 return value
2977
2978 - def parse_list_references(self, value, field_type):
2979 return [self.parse_reference(r, field_type[5:]) for r in value]
2980
2981 - def parse_list_strings(self, value, field_type):
2982 return value
2983
2984 - def represent(self, obj, fieldtype):
2985 field_is_type = fieldtype.startswith 2986 if field_is_type('list:'): 2987 if not obj: 2988 obj = [] 2989 elif not isinstance(obj, (list, tuple)): 2990 obj = [obj] 2991 if field_is_type('list:string'): 2992 obj = map(str,obj) 2993 else: 2994 obj = map(int,obj) 2995 return 'ARRAY[%s]' % ','.join(repr(item) for item in obj) 2996 return BaseAdapter.represent(self, obj, fieldtype)
2997
2998 2999 -class JDBCPostgreSQLAdapter(PostgreSQLAdapter):
3000 drivers = ('zxJDBC',) 3001 3002 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$') 3003
3004 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3005 credential_decoder=IDENTITY, driver_args={}, 3006 adapter_args={}, do_connect=True, after_connection=None ):
3007 self.db = db 3008 self.dbengine = "postgres" 3009 self.uri = uri 3010 if do_connect: self.find_driver(adapter_args,uri) 3011 self.pool_size = pool_size 3012 self.folder = folder 3013 self.db_codec = db_codec 3014 self._after_connection = after_connection 3015 self.find_or_make_work_folder() 3016 ruri = uri.split('://',1)[1] 3017 m = self.REGEX_URI.match(ruri) 3018 if not m: 3019 raise SyntaxError("Invalid URI string in DAL") 3020 user = credential_decoder(m.group('user')) 3021 if not user: 3022 raise SyntaxError('User required') 3023 password = credential_decoder(m.group('password')) 3024 if not password: 3025 password = '' 3026 host = m.group('host') 3027 if not host: 3028 raise SyntaxError('Host name required') 3029 db = m.group('db') 3030 if not db: 3031 raise SyntaxError('Database name required') 3032 port = m.group('port') or '5432' 3033 msg = ('jdbc:postgresql://%s:%s/%s' % (host, port, db), user, password) 3034 def connector(msg=msg,driver_args=driver_args): 3035 return self.driver.connect(*msg,**driver_args)
3036 self.connector = connector 3037 if do_connect: self.reconnect()
3038
3039 - def after_connection(self):
3040 self.connection.set_client_encoding('UTF8') 3041 self.execute('BEGIN;') 3042 self.execute("SET CLIENT_ENCODING TO 'UNICODE';") 3043 self.try_json()
3044
3045 3046 -class OracleAdapter(BaseAdapter):
3047 drivers = ('cx_Oracle',) 3048 3049 commit_on_alter_table = False 3050 types = { 3051 'boolean': 'CHAR(1)', 3052 'string': 'VARCHAR2(%(length)s)', 3053 'text': 'CLOB', 3054 'json': 'CLOB', 3055 'password': 'VARCHAR2(%(length)s)', 3056 'blob': 'CLOB', 3057 'upload': 'VARCHAR2(%(length)s)', 3058 'integer': 'INT', 3059 'bigint': 'NUMBER', 3060 'float': 'FLOAT', 3061 'double': 'BINARY_DOUBLE', 3062 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3063 'date': 'DATE', 3064 'time': 'CHAR(8)', 3065 'datetime': 'DATE', 3066 'id': 'NUMBER PRIMARY KEY', 3067 'reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3068 'list:integer': 'CLOB', 3069 'list:string': 'CLOB', 3070 'list:reference': 'CLOB', 3071 'big-id': 'NUMBER PRIMARY KEY', 3072 'big-reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3073 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3074 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3075 } 3076
3077 - def sequence_name(self,tablename):
3078 return '%s_sequence' % tablename
3079
3080 - def trigger_name(self,tablename):
3081 return '%s_trigger' % tablename
3082
3083 - def LEFT_JOIN(self):
3084 return 'LEFT OUTER JOIN'
3085
3086 - def RANDOM(self):
3087 return 'dbms_random.value'
3088
3089 - def NOT_NULL(self,default,field_type):
3090 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
3091
3092 - def _drop(self,table,mode):
3093 sequence_name = table._sequence_name 3094 return ['DROP TABLE %s %s;' % (table, mode), 'DROP SEQUENCE %s;' % sequence_name]
3095
3096 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3097 if limitby: 3098 (lmin, lmax) = limitby 3099 if len(sql_w) > 1: 3100 sql_w_row = sql_w + ' AND w_row > %i' % lmin 3101 else: 3102 sql_w_row = 'WHERE w_row > %i' % lmin 3103 return 'SELECT %s %s FROM (SELECT w_tmp.*, ROWNUM w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNUM<=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) 3104 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3105
3106 - def constraint_name(self, tablename, fieldname):
3107 constraint_name = BaseAdapter.constraint_name(self, tablename, fieldname) 3108 if len(constraint_name)>30: 3109 constraint_name = '%s_%s__constraint' % (tablename[:10], fieldname[:7]) 3110 return constraint_name
3111
3112 - def represent_exceptions(self, obj, fieldtype):
3113 if fieldtype == 'blob': 3114 obj = base64.b64encode(str(obj)) 3115 return ":CLOB('%s')" % obj 3116 elif fieldtype == 'date': 3117 if isinstance(obj, (datetime.date, datetime.datetime)): 3118 obj = obj.isoformat()[:10] 3119 else: 3120 obj = str(obj) 3121 return "to_date('%s','yyyy-mm-dd')" % obj 3122 elif fieldtype == 'datetime': 3123 if isinstance(obj, datetime.datetime): 3124 obj = obj.isoformat()[:19].replace('T',' ') 3125 elif isinstance(obj, datetime.date): 3126 obj = obj.isoformat()[:10]+' 00:00:00' 3127 else: 3128 obj = str(obj) 3129 return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj 3130 return None
3131
3132 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3133 credential_decoder=IDENTITY, driver_args={}, 3134 adapter_args={}, do_connect=True, after_connection=None):
3135 self.db = db 3136 self.dbengine = "oracle" 3137 self.uri = uri 3138 if do_connect: self.find_driver(adapter_args,uri) 3139 self.pool_size = pool_size 3140 self.folder = folder 3141 self.db_codec = db_codec 3142 self._after_connection = after_connection 3143 self.find_or_make_work_folder() 3144 ruri = uri.split('://',1)[1] 3145 if not 'threaded' in driver_args: 3146 driver_args['threaded']=True 3147 def connector(uri=ruri,driver_args=driver_args): 3148 return self.driver.connect(uri,**driver_args)
3149 self.connector = connector 3150 if do_connect: self.reconnect()
3151
3152 - def after_connection(self):
3153 self.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS';") 3154 self.execute("ALTER SESSION SET NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS';")
3155 3156 oracle_fix = re.compile("[^']*('[^']*'[^']*)*\:(?P<clob>CLOB\('([^']+|'')*'\))") 3157
3158 - def execute(self, command, args=None):
3159 args = args or [] 3160 i = 1 3161 while True: 3162 m = self.oracle_fix.match(command) 3163 if not m: 3164 break 3165 command = command[:m.start('clob')] + str(i) + command[m.end('clob'):] 3166 args.append(m.group('clob')[6:-2].replace("''", "'")) 3167 i += 1 3168 if command[-1:]==';': 3169 command = command[:-1] 3170 return self.log_execute(command, args)
3171
3172 - def create_sequence_and_triggers(self, query, table, **args):
3173 tablename = table._tablename 3174 id_name = table._id.name 3175 sequence_name = table._sequence_name 3176 trigger_name = table._trigger_name 3177 self.execute(query) 3178 self.execute('CREATE SEQUENCE %s START WITH 1 INCREMENT BY 1 NOMAXVALUE MINVALUE -1;' % sequence_name) 3179 self.execute(""" 3180 CREATE OR REPLACE TRIGGER %(trigger_name)s BEFORE INSERT ON %(tablename)s FOR EACH ROW 3181 DECLARE 3182 curr_val NUMBER; 3183 diff_val NUMBER; 3184 PRAGMA autonomous_transaction; 3185 BEGIN 3186 IF :NEW.%(id)s IS NOT NULL THEN 3187 EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val; 3188 diff_val := :NEW.%(id)s - curr_val - 1; 3189 IF diff_val != 0 THEN 3190 EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by '|| diff_val; 3191 EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val; 3192 EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by 1'; 3193 END IF; 3194 END IF; 3195 SELECT %(sequence_name)s.nextval INTO :NEW.%(id)s FROM DUAL; 3196 END; 3197 """ % dict(trigger_name=trigger_name, tablename=tablename, 3198 sequence_name=sequence_name,id=id_name))
3199
3200 - def lastrowid(self,table):
3201 sequence_name = table._sequence_name 3202 self.execute('SELECT %s.currval FROM dual;' % sequence_name) 3203 return long(self.cursor.fetchone()[0])
3204 3205 #def parse_value(self, value, field_type, blob_decode=True): 3206 # if blob_decode and isinstance(value, cx_Oracle.LOB): 3207 # try: 3208 # value = value.read() 3209 # except self.driver.ProgrammingError: 3210 # # After a subsequent fetch the LOB value is not valid anymore 3211 # pass 3212 # return BaseAdapter.parse_value(self, value, field_type, blob_decode) 3213
3214 - def _fetchall(self):
3215 if any(x[1]==cx_Oracle.CLOB for x in self.cursor.description): 3216 return [tuple([(c.read() if type(c) == cx_Oracle.LOB else c) \ 3217 for c in r]) for r in self.cursor] 3218 else: 3219 return self.cursor.fetchall()
3220
3221 -class MSSQLAdapter(BaseAdapter):
3222 drivers = ('pyodbc',) 3223 T_SEP = 'T' 3224 3225 QUOTE_TEMPLATE = "[%s]" 3226 3227 types = { 3228 'boolean': 'BIT', 3229 'string': 'VARCHAR(%(length)s)', 3230 'text': 'TEXT', 3231 'json': 'TEXT', 3232 'password': 'VARCHAR(%(length)s)', 3233 'blob': 'IMAGE', 3234 'upload': 'VARCHAR(%(length)s)', 3235 'integer': 'INT', 3236 'bigint': 'BIGINT', 3237 'float': 'FLOAT', 3238 'double': 'FLOAT', 3239 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3240 'date': 'DATETIME', 3241 'time': 'CHAR(8)', 3242 'datetime': 'DATETIME', 3243 'id': 'INT IDENTITY PRIMARY KEY', 3244 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3245 'list:integer': 'TEXT', 3246 'list:string': 'TEXT', 3247 'list:reference': 'TEXT', 3248 'geometry': 'geometry', 3249 'geography': 'geography', 3250 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3251 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3252 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3253 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3254 } 3255
3256 - def concat_add(self,tablename):
3257 return '; ALTER TABLE %s ADD ' % tablename
3258
3259 - def varquote(self,name):
3260 return varquote_aux(name,'[%s]')
3261
3262 - def EXTRACT(self,field,what):
3263 return "DATEPART(%s,%s)" % (what, self.expand(field))
3264
3265 - def LEFT_JOIN(self):
3266 return 'LEFT OUTER JOIN'
3267
3268 - def RANDOM(self):
3269 return 'NEWID()'
3270
3271 - def ALLOW_NULL(self):
3272 return ' NULL'
3273
3274 - def CAST(self, first, second):
3275 return first # apparently no cast necessary in MSSQL
3276
3277 - def SUBSTRING(self,field,parameters):
3278 return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
3279
3280 - def PRIMARY_KEY(self,key):
3281 return 'PRIMARY KEY CLUSTERED (%s)' % key
3282
3283 - def AGGREGATE(self, first, what):
3284 if what == 'LENGTH': 3285 what = 'LEN' 3286 return "%s(%s)" % (what, self.expand(first))
3287 3288
3289 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3290 if limitby: 3291 (lmin, lmax) = limitby 3292 sql_s += ' TOP %i' % lmax 3293 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3294 3295 TRUE = 1 3296 FALSE = 0 3297 3298 REGEX_DSN = re.compile('^(?P<dsn>.+)$') 3299 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?(?P<urlargs>.*))?$') 3300 REGEX_ARGPATTERN = re.compile('(?P<argkey>[^=]+)=(?P<argvalue>[^&]*)') 3301
3302 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3303 credential_decoder=IDENTITY, driver_args={}, 3304 adapter_args={}, do_connect=True, srid=4326, 3305 after_connection=None):
3306 self.db = db 3307 self.dbengine = "mssql" 3308 self.uri = uri 3309 if do_connect: self.find_driver(adapter_args,uri) 3310 self.pool_size = pool_size 3311 self.folder = folder 3312 self.db_codec = db_codec 3313 self._after_connection = after_connection 3314 self.srid = srid 3315 self.find_or_make_work_folder() 3316 # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8 3317 ruri = uri.split('://',1)[1] 3318 if '@' not in ruri: 3319 try: 3320 m = self.REGEX_DSN.match(ruri) 3321 if not m: 3322 raise SyntaxError( 3323 'Parsing uri string(%s) has no result' % self.uri) 3324 dsn = m.group('dsn') 3325 if not dsn: 3326 raise SyntaxError('DSN required') 3327 except SyntaxError: 3328 e = sys.exc_info()[1] 3329 LOGGER.error('NdGpatch error') 3330 raise e 3331 # was cnxn = 'DSN=%s' % dsn 3332 cnxn = dsn 3333 else: 3334 m = self.REGEX_URI.match(ruri) 3335 if not m: 3336 raise SyntaxError( 3337 "Invalid URI string in DAL: %s" % self.uri) 3338 user = credential_decoder(m.group('user')) 3339 if not user: 3340 raise SyntaxError('User required') 3341 password = credential_decoder(m.group('password')) 3342 if not password: 3343 password = '' 3344 host = m.group('host') 3345 if not host: 3346 raise SyntaxError('Host name required') 3347 db = m.group('db') 3348 if not db: 3349 raise SyntaxError('Database name required') 3350 port = m.group('port') or '1433' 3351 # Parse the optional url name-value arg pairs after the '?' 3352 # (in the form of arg1=value1&arg2=value2&...) 3353 # Default values (drivers like FreeTDS insist on uppercase parameter keys) 3354 argsdict = { 'DRIVER':'{SQL Server}' } 3355 urlargs = m.group('urlargs') or '' 3356 for argmatch in self.REGEX_ARGPATTERN.finditer(urlargs): 3357 argsdict[str(argmatch.group('argkey')).upper()] = argmatch.group('argvalue') 3358 urlargs = ';'.join(['%s=%s' % (ak, av) for (ak, av) in argsdict.iteritems()]) 3359 cnxn = 'SERVER=%s;PORT=%s;DATABASE=%s;UID=%s;PWD=%s;%s' \ 3360 % (host, port, db, user, password, urlargs) 3361 def connector(cnxn=cnxn,driver_args=driver_args): 3362 return self.driver.connect(cnxn,**driver_args)
3363 self.connector = connector 3364 if do_connect: self.reconnect()
3365
3366 - def lastrowid(self,table):
3367 #self.execute('SELECT @@IDENTITY;') 3368 self.execute('SELECT SCOPE_IDENTITY();') 3369 return long(self.cursor.fetchone()[0])
3370
3371 - def rowslice(self,rows,minimum=0,maximum=None):
3372 if maximum is None: 3373 return rows[minimum:] 3374 return rows[minimum:maximum]
3375
3376 - def EPOCH(self, first):
3377 return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first)
3378
3379 - def CONCAT(self, *items):
3380 return '(%s)' % ' + '.join(self.expand(x,'string') for x in items)
3381 3382 # GIS Spatial Extensions 3383 3384 # No STAsGeoJSON in MSSQL 3385
3386 - def ST_ASTEXT(self, first):
3387 return '%s.STAsText()' %(self.expand(first))
3388
3389 - def ST_CONTAINS(self, first, second):
3390 return '%s.STContains(%s)=1' %(self.expand(first), self.expand(second, first.type))
3391
3392 - def ST_DISTANCE(self, first, second):
3393 return '%s.STDistance(%s)' %(self.expand(first), self.expand(second, first.type))
3394
3395 - def ST_EQUALS(self, first, second):
3396 return '%s.STEquals(%s)=1' %(self.expand(first), self.expand(second, first.type))
3397
3398 - def ST_INTERSECTS(self, first, second):
3399 return '%s.STIntersects(%s)=1' %(self.expand(first), self.expand(second, first.type))
3400
3401 - def ST_OVERLAPS(self, first, second):
3402 return '%s.STOverlaps(%s)=1' %(self.expand(first), self.expand(second, first.type))
3403 3404 # no STSimplify in MSSQL 3405
3406 - def ST_TOUCHES(self, first, second):
3407 return '%s.STTouches(%s)=1' %(self.expand(first), self.expand(second, first.type))
3408
3409 - def ST_WITHIN(self, first, second):
3410 return '%s.STWithin(%s)=1' %(self.expand(first), self.expand(second, first.type))
3411
3412 - def represent(self, obj, fieldtype):
3413 field_is_type = fieldtype.startswith 3414 if field_is_type('geometry'): 3415 srid = 0 # MS SQL default srid for geometry 3416 geotype, parms = fieldtype[:-1].split('(') 3417 if parms: 3418 srid = parms 3419 return "geometry::STGeomFromText('%s',%s)" %(obj, srid) 3420 elif fieldtype == 'geography': 3421 srid = 4326 # MS SQL default srid for geography 3422 geotype, parms = fieldtype[:-1].split('(') 3423 if parms: 3424 srid = parms 3425 return "geography::STGeomFromText('%s',%s)" %(obj, srid) 3426 # else: 3427 # raise SyntaxError('Invalid field type %s' %fieldtype) 3428 return "geometry::STGeomFromText('%s',%s)" %(obj, srid) 3429 return BaseAdapter.represent(self, obj, fieldtype)
3430
3431 3432 -class MSSQL3Adapter(MSSQLAdapter):
3433 """ experimental support for pagination in MSSQL"""
3434 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3435 if limitby: 3436 (lmin, lmax) = limitby 3437 if lmin == 0: 3438 sql_s += ' TOP %i' % lmax 3439 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) 3440 lmin += 1 3441 sql_o_inner = sql_o[sql_o.find('ORDER BY ')+9:] 3442 sql_g_inner = sql_o[:sql_o.find('ORDER BY ')] 3443 sql_f_outer = ['f_%s' % f for f in range(len(sql_f.split(',')))] 3444 sql_f_inner = [f for f in sql_f.split(',')] 3445 sql_f_iproxy = ['%s AS %s' % (o, n) for (o, n) in zip(sql_f_inner, sql_f_outer)] 3446 sql_f_iproxy = ', '.join(sql_f_iproxy) 3447 sql_f_oproxy = ', '.join(sql_f_outer) 3448 return 'SELECT %s %s FROM (SELECT %s ROW_NUMBER() OVER (ORDER BY %s) AS w_row, %s FROM %s%s%s) TMP WHERE w_row BETWEEN %i AND %s;' % (sql_s,sql_f_oproxy,sql_s,sql_f,sql_f_iproxy,sql_t,sql_w,sql_g_inner,lmin,lmax) 3449 return 'SELECT %s %s FROM %s%s%s;' % (sql_s,sql_f,sql_t,sql_w,sql_o)
3450 - def rowslice(self,rows,minimum=0,maximum=None):
3451 return rows
3452
3453 -class MSSQL4Adapter(MSSQLAdapter):
3454 """ support for true pagination in MSSQL >= 2012""" 3455
3456 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3457 if limitby: 3458 (lmin, lmax) = limitby 3459 if lmin == 0: 3460 #top is still slightly faster, especially because 3461 #web2py's default to fetch references is to not specify 3462 #an orderby clause 3463 sql_s += ' TOP %i' % lmax 3464 else: 3465 if not sql_o: 3466 #if there is no orderby, we can't use the brand new statements 3467 #that being said, developer chose its own poison, so be it random 3468 sql_o += ' ORDER BY %s' % self.RANDOM() 3469 sql_o += ' OFFSET %i ROWS FETCH NEXT %i ROWS ONLY' % (lmin, lmax - lmin) 3470 return 'SELECT %s %s FROM %s%s%s;' % \ 3471 (sql_s, sql_f, sql_t, sql_w, sql_o)
3472
3473 - def rowslice(self,rows,minimum=0,maximum=None):
3474 return rows
3475
3476 -class MSSQL2Adapter(MSSQLAdapter):
3477 drivers = ('pyodbc',) 3478 3479 types = { 3480 'boolean': 'CHAR(1)', 3481 'string': 'NVARCHAR(%(length)s)', 3482 'text': 'NTEXT', 3483 'json': 'NTEXT', 3484 'password': 'NVARCHAR(%(length)s)', 3485 'blob': 'IMAGE', 3486 'upload': 'NVARCHAR(%(length)s)', 3487 'integer': 'INT', 3488 'bigint': 'BIGINT', 3489 'float': 'FLOAT', 3490 'double': 'FLOAT', 3491 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3492 'date': 'DATETIME', 3493 'time': 'CHAR(8)', 3494 'datetime': 'DATETIME', 3495 'id': 'INT IDENTITY PRIMARY KEY', 3496 'reference': 'INT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3497 'list:integer': 'NTEXT', 3498 'list:string': 'NTEXT', 3499 'list:reference': 'NTEXT', 3500 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3501 'big-reference': 'BIGINT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3502 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3503 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3504 } 3505
3506 - def represent(self, obj, fieldtype):
3507 value = BaseAdapter.represent(self, obj, fieldtype) 3508 if fieldtype in ('string','text', 'json') and value[:1]=="'": 3509 value = 'N'+value 3510 return value
3511
3512 - def execute(self,a):
3513 return self.log_execute(a.decode('utf8'))
3514
3515 -class VerticaAdapter(MSSQLAdapter):
3516 drivers = ('pyodbc',) 3517 T_SEP = ' ' 3518 3519 types = { 3520 'boolean': 'BOOLEAN', 3521 'string': 'VARCHAR(%(length)s)', 3522 'text': 'BYTEA', 3523 'json': 'VARCHAR(%(length)s)', 3524 'password': 'VARCHAR(%(length)s)', 3525 'blob': 'BYTEA', 3526 'upload': 'VARCHAR(%(length)s)', 3527 'integer': 'INT', 3528 'bigint': 'BIGINT', 3529 'float': 'FLOAT', 3530 'double': 'DOUBLE PRECISION', 3531 'decimal': 'DECIMAL(%(precision)s,%(scale)s)', 3532 'date': 'DATE', 3533 'time': 'TIME', 3534 'datetime': 'DATETIME', 3535 'id': 'IDENTITY', 3536 'reference': 'INT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3537 'list:integer': 'BYTEA', 3538 'list:string': 'BYTEA', 3539 'list:reference': 'BYTEA', 3540 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3541 } 3542 3543
3544 - def EXTRACT(self, first, what):
3545 return "DATE_PART('%s', TIMESTAMP %s)" % (what, self.expand(first))
3546
3547 - def _truncate(self, table, mode=''):
3548 tablename = table._tablename 3549 return ['TRUNCATE %s %s;' % (tablename, mode or '')]
3550
3551 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3552 if limitby: 3553 (lmin, lmax) = limitby 3554 sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin) 3555 return 'SELECT %s %s FROM %s%s%s;' % \ 3556 (sql_s, sql_f, sql_t, sql_w, sql_o)
3557
3558 - def lastrowid(self,table):
3559 self.execute('SELECT LAST_INSERT_ID();') 3560 return long(self.cursor.fetchone()[0])
3561
3562 - def execute(self, a):
3563 return self.log_execute(a)
3564
3565 -class SybaseAdapter(MSSQLAdapter):
3566 drivers = ('Sybase',) 3567 3568 types = { 3569 'boolean': 'BIT', 3570 'string': 'CHAR VARYING(%(length)s)', 3571 'text': 'TEXT', 3572 'json': 'TEXT', 3573 'password': 'CHAR VARYING(%(length)s)', 3574 'blob': 'IMAGE', 3575 'upload': 'CHAR VARYING(%(length)s)', 3576 'integer': 'INT', 3577 'bigint': 'BIGINT', 3578 'float': 'FLOAT', 3579 'double': 'FLOAT', 3580 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3581 'date': 'DATETIME', 3582 'time': 'CHAR(8)', 3583 'datetime': 'DATETIME', 3584 'id': 'INT IDENTITY PRIMARY KEY', 3585 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3586 'list:integer': 'TEXT', 3587 'list:string': 'TEXT', 3588 'list:reference': 'TEXT', 3589 'geometry': 'geometry', 3590 'geography': 'geography', 3591 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3592 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3593 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3594 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3595 } 3596 3597
3598 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3599 credential_decoder=IDENTITY, driver_args={}, 3600 adapter_args={}, do_connect=True, srid=4326, 3601 after_connection=None):
3602 self.db = db 3603 self.dbengine = "sybase" 3604 self.uri = uri 3605 if do_connect: self.find_driver(adapter_args,uri) 3606 self.pool_size = pool_size 3607 self.folder = folder 3608 self.db_codec = db_codec 3609 self._after_connection = after_connection 3610 self.srid = srid 3611 self.find_or_make_work_folder() 3612 # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8 3613 ruri = uri.split('://',1)[1] 3614 if '@' not in ruri: 3615 try: 3616 m = self.REGEX_DSN.match(ruri) 3617 if not m: 3618 raise SyntaxError( 3619 'Parsing uri string(%s) has no result' % self.uri) 3620 dsn = m.group('dsn') 3621 if not dsn: 3622 raise SyntaxError('DSN required') 3623 except SyntaxError: 3624 e = sys.exc_info()[1] 3625 LOGGER.error('NdGpatch error') 3626 raise e 3627 else: 3628 m = self.REGEX_URI.match(uri) 3629 if not m: 3630 raise SyntaxError( 3631 "Invalid URI string in DAL: %s" % self.uri) 3632 user = credential_decoder(m.group('user')) 3633 if not user: 3634 raise SyntaxError('User required') 3635 password = credential_decoder(m.group('password')) 3636 if not password: 3637 password = '' 3638 host = m.group('host') 3639 if not host: 3640 raise SyntaxError('Host name required') 3641 db = m.group('db') 3642 if not db: 3643 raise SyntaxError('Database name required') 3644 port = m.group('port') or '1433' 3645 3646 dsn = 'sybase:host=%s:%s;dbname=%s' % (host,port,db) 3647 3648 driver_args.update(user = credential_decoder(user), 3649 password = credential_decoder(password)) 3650 3651 def connector(dsn=dsn,driver_args=driver_args): 3652 return self.driver.connect(dsn,**driver_args)
3653 self.connector = connector 3654 if do_connect: self.reconnect()
3655
3656 3657 -class FireBirdAdapter(BaseAdapter):
3658 drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc') 3659 3660 commit_on_alter_table = False 3661 support_distributed_transaction = True 3662 types = { 3663 'boolean': 'CHAR(1)', 3664 'string': 'VARCHAR(%(length)s)', 3665 'text': 'BLOB SUB_TYPE 1', 3666 'json': 'BLOB SUB_TYPE 1', 3667 'password': 'VARCHAR(%(length)s)', 3668 'blob': 'BLOB SUB_TYPE 0', 3669 'upload': 'VARCHAR(%(length)s)', 3670 'integer': 'INTEGER', 3671 'bigint': 'BIGINT', 3672 'float': 'FLOAT', 3673 'double': 'DOUBLE PRECISION', 3674 'decimal': 'DECIMAL(%(precision)s,%(scale)s)', 3675 'date': 'DATE', 3676 'time': 'TIME', 3677 'datetime': 'TIMESTAMP', 3678 'id': 'INTEGER PRIMARY KEY', 3679 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3680 'list:integer': 'BLOB SUB_TYPE 1', 3681 'list:string': 'BLOB SUB_TYPE 1', 3682 'list:reference': 'BLOB SUB_TYPE 1', 3683 'big-id': 'BIGINT PRIMARY KEY', 3684 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3685 } 3686
3687 - def sequence_name(self,tablename):
3688 return 'genid_%s' % tablename
3689
3690 - def trigger_name(self,tablename):
3691 return 'trg_id_%s' % tablename
3692
3693 - def RANDOM(self):
3694 return 'RAND()'
3695
3696 - def EPOCH(self, first):
3697 return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first)
3698
3699 - def NOT_NULL(self,default,field_type):
3700 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
3701
3702 - def SUBSTRING(self,field,parameters):
3703 return 'SUBSTRING(%s from %s for %s)' % (self.expand(field), parameters[0], parameters[1])
3704
3705 - def LENGTH(self, first):
3706 return "CHAR_LENGTH(%s)" % self.expand(first)
3707
3708 - def CONTAINS(self,first,second,case_sensitive=False):
3709 if first.type.startswith('list:'): 3710 second = Expression(None,self.CONCAT('|',Expression( 3711 None,self.REPLACE(second,('|','||'))),'|')) 3712 return '(%s CONTAINING %s)' % (self.expand(first), 3713 self.expand(second, 'string'))
3714
3715 - def _drop(self,table,mode):
3716 sequence_name = table._sequence_name 3717 return ['DROP TABLE %s %s;' % (table, mode), 'DROP GENERATOR %s;' % sequence_name]
3718
3719 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3720 if limitby: 3721 (lmin, lmax) = limitby 3722 sql_s = ' FIRST %i SKIP %i %s' % (lmax - lmin, lmin, sql_s) 3723 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3724
3725 - def _truncate(self,table,mode = ''):
3726 return ['DELETE FROM %s;' % table._tablename, 3727 'SET GENERATOR %s TO 0;' % table._sequence_name]
3728 3729 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+?)(\?set_encoding=(?P<charset>\w+))?$') 3730
3731 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3732 credential_decoder=IDENTITY, driver_args={}, 3733 adapter_args={}, do_connect=True, after_connection=None):
3734 self.db = db 3735 self.dbengine = "firebird" 3736 self.uri = uri 3737 if do_connect: self.find_driver(adapter_args,uri) 3738 self.pool_size = pool_size 3739 self.folder = folder 3740 self.db_codec = db_codec 3741 self._after_connection = after_connection 3742 self.find_or_make_work_folder() 3743 ruri = uri.split('://',1)[1] 3744 m = self.REGEX_URI.match(ruri) 3745 if not m: 3746 raise SyntaxError("Invalid URI string in DAL: %s" % self.uri) 3747 user = credential_decoder(m.group('user')) 3748 if not user: 3749 raise SyntaxError('User required') 3750 password = credential_decoder(m.group('password')) 3751 if not password: 3752 password = '' 3753 host = m.group('host') 3754 if not host: 3755 raise SyntaxError('Host name required') 3756 port = int(m.group('port') or 3050) 3757 db = m.group('db') 3758 if not db: 3759 raise SyntaxError('Database name required') 3760 charset = m.group('charset') or 'UTF8' 3761 driver_args.update(dsn='%s/%s:%s' % (host,port,db), 3762 user = credential_decoder(user), 3763 password = credential_decoder(password), 3764 charset = charset) 3765 3766 def connector(driver_args=driver_args): 3767 return self.driver.connect(**driver_args)
3768 self.connector = connector 3769 if do_connect: self.reconnect()
3770
3771 - def create_sequence_and_triggers(self, query, table, **args):
3772 tablename = table._tablename 3773 sequence_name = table._sequence_name 3774 trigger_name = table._trigger_name 3775 self.execute(query) 3776 self.execute('create generator %s;' % sequence_name) 3777 self.execute('set generator %s to 0;' % sequence_name) 3778 self.execute('create trigger %s for %s active before insert position 0 as\nbegin\nif(new.id is null) then\nbegin\nnew.id = gen_id(%s, 1);\nend\nend;' % (trigger_name, tablename, sequence_name))
3779
3780 - def lastrowid(self,table):
3781 sequence_name = table._sequence_name 3782 self.execute('SELECT gen_id(%s, 0) FROM rdb$database' % sequence_name) 3783 return long(self.cursor.fetchone()[0])
3784
3785 3786 -class FireBirdEmbeddedAdapter(FireBirdAdapter):
3787 drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc') 3788 3789 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<path>[^\?]+)(\?set_encoding=(?P<charset>\w+))?$') 3790
3791 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3792 credential_decoder=IDENTITY, driver_args={}, 3793 adapter_args={}, do_connect=True, after_connection=None):
3794 self.db = db 3795 self.dbengine = "firebird" 3796 self.uri = uri 3797 if do_connect: self.find_driver(adapter_args,uri) 3798 self.pool_size = pool_size 3799 self.folder = folder 3800 self.db_codec = db_codec 3801 self._after_connection = after_connection 3802 self.find_or_make_work_folder() 3803 ruri = uri.split('://',1)[1] 3804 m = self.REGEX_URI.match(ruri) 3805 if not m: 3806 raise SyntaxError( 3807 "Invalid URI string in DAL: %s" % self.uri) 3808 user = credential_decoder(m.group('user')) 3809 if not user: 3810 raise SyntaxError('User required') 3811 password = credential_decoder(m.group('password')) 3812 if not password: 3813 password = '' 3814 pathdb = m.group('path') 3815 if not pathdb: 3816 raise SyntaxError('Path required') 3817 charset = m.group('charset') 3818 if not charset: 3819 charset = 'UTF8' 3820 host = '' 3821 driver_args.update(host=host, 3822 database=pathdb, 3823 user=credential_decoder(user), 3824 password=credential_decoder(password), 3825 charset=charset) 3826 3827 def connector(driver_args=driver_args): 3828 return self.driver.connect(**driver_args)
3829 self.connector = connector 3830 if do_connect: self.reconnect()
3831
3832 -class InformixAdapter(BaseAdapter):
3833 drivers = ('informixdb',) 3834 3835 types = { 3836 'boolean': 'CHAR(1)', 3837 'string': 'VARCHAR(%(length)s)', 3838 'text': 'BLOB SUB_TYPE 1', 3839 'json': 'BLOB SUB_TYPE 1', 3840 'password': 'VARCHAR(%(length)s)', 3841 'blob': 'BLOB SUB_TYPE 0', 3842 'upload': 'VARCHAR(%(length)s)', 3843 'integer': 'INTEGER', 3844 'bigint': 'BIGINT', 3845 'float': 'FLOAT', 3846 'double': 'DOUBLE PRECISION', 3847 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3848 'date': 'DATE', 3849 'time': 'CHAR(8)', 3850 'datetime': 'DATETIME', 3851 'id': 'SERIAL', 3852 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3853 'list:integer': 'BLOB SUB_TYPE 1', 3854 'list:string': 'BLOB SUB_TYPE 1', 3855 'list:reference': 'BLOB SUB_TYPE 1', 3856 'big-id': 'BIGSERIAL', 3857 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3858 'reference FK': 'REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s CONSTRAINT FK_%(table_name)s_%(field_name)s', 3859 'reference TFK': 'FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s CONSTRAINT TFK_%(table_name)s_%(field_name)s', 3860 } 3861
3862 - def RANDOM(self):
3863 return 'Random()'
3864
3865 - def NOT_NULL(self,default,field_type):
3866 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
3867
3868 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3869 if limitby: 3870 (lmin, lmax) = limitby 3871 fetch_amt = lmax - lmin 3872 dbms_version = int(self.connection.dbms_version.split('.')[0]) 3873 if lmin and (dbms_version >= 10): 3874 # Requires Informix 10.0+ 3875 sql_s += ' SKIP %d' % (lmin, ) 3876 if fetch_amt and (dbms_version >= 9): 3877 # Requires Informix 9.0+ 3878 sql_s += ' FIRST %d' % (fetch_amt, ) 3879 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3880
3881 - def represent_exceptions(self, obj, fieldtype):
3882 if fieldtype == 'date': 3883 if isinstance(obj, (datetime.date, datetime.datetime)): 3884 obj = obj.isoformat()[:10] 3885 else: 3886 obj = str(obj) 3887 return "to_date('%s','%%Y-%%m-%%d')" % obj 3888 elif fieldtype == 'datetime': 3889 if isinstance(obj, datetime.datetime): 3890 obj = obj.isoformat()[:19].replace('T',' ') 3891 elif isinstance(obj, datetime.date): 3892 obj = obj.isoformat()[:10]+' 00:00:00' 3893 else: 3894 obj = str(obj) 3895 return "to_date('%s','%%Y-%%m-%%d %%H:%%M:%%S')" % obj 3896 return None
3897 3898 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$') 3899
3900 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3901 credential_decoder=IDENTITY, driver_args={}, 3902 adapter_args={}, do_connect=True, after_connection=None):
3903 self.db = db 3904 self.dbengine = "informix" 3905 self.uri = uri 3906 if do_connect: self.find_driver(adapter_args,uri) 3907 self.pool_size = pool_size 3908 self.folder = folder 3909 self.db_codec = db_codec 3910 self._after_connection = after_connection 3911 self.find_or_make_work_folder() 3912 ruri = uri.split('://',1)[1] 3913 m = self.REGEX_URI.match(ruri) 3914 if not m: 3915 raise SyntaxError( 3916 "Invalid URI string in DAL: %s" % self.uri) 3917 user = credential_decoder(m.group('user')) 3918 if not user: 3919 raise SyntaxError('User required') 3920 password = credential_decoder(m.group('password')) 3921 if not password: 3922 password = '' 3923 host = m.group('host') 3924 if not host: 3925 raise SyntaxError('Host name required') 3926 db = m.group('db') 3927 if not db: 3928 raise SyntaxError('Database name required') 3929 user = credential_decoder(user) 3930 password = credential_decoder(password) 3931 dsn = '%s@%s' % (db,host) 3932 driver_args.update(user=user,password=password,autocommit=True) 3933 def connector(dsn=dsn,driver_args=driver_args): 3934 return self.driver.connect(dsn,**driver_args)
3935 self.connector = connector 3936 if do_connect: self.reconnect()
3937
3938 - def execute(self,command):
3939 if command[-1:]==';': 3940 command = command[:-1] 3941 return self.log_execute(command)
3942
3943 - def lastrowid(self,table):
3944 return self.cursor.sqlerrd[1]
3945
3946 -class InformixSEAdapter(InformixAdapter):
3947 """ work in progress """ 3948
3949 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3950 return 'SELECT %s %s FROM %s%s%s;' % \ 3951 (sql_s, sql_f, sql_t, sql_w, sql_o)
3952
3953 - def rowslice(self,rows,minimum=0,maximum=None):
3954 if maximum is None: 3955 return rows[minimum:] 3956 return rows[minimum:maximum]
3957
3958 -class DB2Adapter(BaseAdapter):
3959 drivers = ('pyodbc',) 3960 3961 types = { 3962 'boolean': 'CHAR(1)', 3963 'string': 'VARCHAR(%(length)s)', 3964 'text': 'CLOB', 3965 'json': 'CLOB', 3966 'password': 'VARCHAR(%(length)s)', 3967 'blob': 'BLOB', 3968 'upload': 'VARCHAR(%(length)s)', 3969 'integer': 'INT', 3970 'bigint': 'BIGINT', 3971 'float': 'REAL', 3972 'double': 'DOUBLE', 3973 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3974 'date': 'DATE', 3975 'time': 'TIME', 3976 'datetime': 'TIMESTAMP', 3977 'id': 'INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 3978 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3979 'list:integer': 'CLOB', 3980 'list:string': 'CLOB', 3981 'list:reference': 'CLOB', 3982 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 3983 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3984 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3985 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3986 } 3987
3988 - def LEFT_JOIN(self):
3989 return 'LEFT OUTER JOIN'
3990
3991 - def RANDOM(self):
3992 return 'RAND()'
3993
3994 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3995 if limitby: 3996 (lmin, lmax) = limitby 3997 sql_o += ' FETCH FIRST %i ROWS ONLY' % lmax 3998 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3999
4000 - def represent_exceptions(self, obj, fieldtype):
4001 if fieldtype == 'blob': 4002 obj = base64.b64encode(str(obj)) 4003 return "BLOB('%s')" % obj 4004 elif fieldtype == 'datetime': 4005 if isinstance(obj, datetime.datetime): 4006 obj = obj.isoformat()[:19].replace('T','-').replace(':','.') 4007 elif isinstance(obj, datetime.date): 4008 obj = obj.isoformat()[:10]+'-00.00.00' 4009 return "'%s'" % obj 4010 return None
4011
4012 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4013 credential_decoder=IDENTITY, driver_args={}, 4014 adapter_args={}, do_connect=True, after_connection=None):
4015 self.db = db 4016 self.dbengine = "db2" 4017 self.uri = uri 4018 if do_connect: self.find_driver(adapter_args,uri) 4019 self.pool_size = pool_size 4020 self.folder = folder 4021 self.db_codec = db_codec 4022 self._after_connection = after_connection 4023 self.find_or_make_work_folder() 4024 ruri = uri.split('://', 1)[1] 4025 def connector(cnxn=ruri,driver_args=driver_args): 4026 return self.driver.connect(cnxn,**driver_args)
4027 self.connector = connector 4028 if do_connect: self.reconnect()
4029
4030 - def execute(self,command):
4031 if command[-1:]==';': 4032 command = command[:-1] 4033 return self.log_execute(command)
4034
4035 - def lastrowid(self,table):
4036 self.execute('SELECT DISTINCT IDENTITY_VAL_LOCAL() FROM %s;' % table) 4037 return long(self.cursor.fetchone()[0])
4038
4039 - def rowslice(self,rows,minimum=0,maximum=None):
4040 if maximum is None: 4041 return rows[minimum:] 4042 return rows[minimum:maximum]
4043
4044 4045 -class TeradataAdapter(BaseAdapter):
4046 drivers = ('pyodbc',) 4047 4048 types = { 4049 'boolean': 'CHAR(1)', 4050 'string': 'VARCHAR(%(length)s)', 4051 'text': 'VARCHAR(2000)', 4052 'json': 'VARCHAR(4000)', 4053 'password': 'VARCHAR(%(length)s)', 4054 'blob': 'BLOB', 4055 'upload': 'VARCHAR(%(length)s)', 4056 'integer': 'INT', 4057 'bigint': 'BIGINT', 4058 'float': 'REAL', 4059 'double': 'DOUBLE', 4060 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 4061 'date': 'DATE', 4062 'time': 'TIME', 4063 'datetime': 'TIMESTAMP', 4064 # Modified Constraint syntax for Teradata. 4065 # Teradata does not support ON DELETE. 4066 'id': 'INT GENERATED ALWAYS AS IDENTITY', # Teradata Specific 4067 'reference': 'INT', 4068 'list:integer': 'VARCHAR(4000)', 4069 'list:string': 'VARCHAR(4000)', 4070 'list:reference': 'VARCHAR(4000)', 4071 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY', # Teradata Specific 4072 'big-reference': 'BIGINT', 4073 'reference FK': ' REFERENCES %(foreign_key)s', 4074 'reference TFK': ' FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s)', 4075 } 4076
4077 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4078 credential_decoder=IDENTITY, driver_args={}, 4079 adapter_args={}, do_connect=True, after_connection=None):
4080 self.db = db 4081 self.dbengine = "teradata" 4082 self.uri = uri 4083 if do_connect: self.find_driver(adapter_args,uri) 4084 self.pool_size = pool_size 4085 self.folder = folder 4086 self.db_codec = db_codec 4087 self._after_connection = after_connection 4088 self.find_or_make_work_folder() 4089 ruri = uri.split('://', 1)[1] 4090 def connector(cnxn=ruri,driver_args=driver_args): 4091 return self.driver.connect(cnxn,**driver_args)
4092 self.connector = connector 4093 if do_connect: self.reconnect()
4094
4095 - def close(self,action='commit',really=True):
4096 # Teradata does not implicitly close off the cursor 4097 # leading to SQL_ACTIVE_STATEMENTS limit errors 4098 self.cursor.close() 4099 ConnectionPool.close(self, action, really)
4100
4101 - def LEFT_JOIN(self):
4102 return 'LEFT OUTER JOIN'
4103 4104 # Similar to MSSQL, Teradata can't specify a range (for Pageby)
4105 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
4106 if limitby: 4107 (lmin, lmax) = limitby 4108 sql_s += ' TOP %i' % lmax 4109 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
4110
4111 - def _truncate(self, table, mode=''):
4112 tablename = table._tablename 4113 return ['DELETE FROM %s ALL;' % (tablename)]
4114 4115 INGRES_SEQNAME='ii***lineitemsequence' # NOTE invalid database object name
4116 # (ANSI-SQL wants this form of name 4117 # to be a delimited identifier) 4118 4119 -class IngresAdapter(BaseAdapter):
4120 drivers = ('pyodbc',) 4121 4122 types = { 4123 'boolean': 'CHAR(1)', 4124 'string': 'VARCHAR(%(length)s)', 4125 'text': 'CLOB', 4126 'json': 'CLOB', 4127 'password': 'VARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 4128 'blob': 'BLOB', 4129 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 4130 'integer': 'INTEGER4', # or int8... 4131 'bigint': 'BIGINT', 4132 'float': 'FLOAT', 4133 'double': 'FLOAT8', 4134 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 4135 'date': 'ANSIDATE', 4136 'time': 'TIME WITHOUT TIME ZONE', 4137 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 4138 'id': 'int not null unique with default next value for %s' % INGRES_SEQNAME, 4139 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4140 'list:integer': 'CLOB', 4141 'list:string': 'CLOB', 4142 'list:reference': 'CLOB', 4143 'big-id': 'bigint not null unique with default next value for %s' % INGRES_SEQNAME, 4144 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4145 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4146 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO 4147 } 4148
4149 - def LEFT_JOIN(self):
4150 return 'LEFT OUTER JOIN'
4151
4152 - def RANDOM(self):
4153 return 'RANDOM()'
4154
4155 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
4156 if limitby: 4157 (lmin, lmax) = limitby 4158 fetch_amt = lmax - lmin 4159 if fetch_amt: 4160 sql_s += ' FIRST %d ' % (fetch_amt, ) 4161 if lmin: 4162 # Requires Ingres 9.2+ 4163 sql_o += ' OFFSET %d' % (lmin, ) 4164 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
4165
4166 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4167 credential_decoder=IDENTITY, driver_args={}, 4168 adapter_args={}, do_connect=True, after_connection=None):
4169 self.db = db 4170 self.dbengine = "ingres" 4171 self._driver = pyodbc 4172 self.uri = uri 4173 if do_connect: self.find_driver(adapter_args,uri) 4174 self.pool_size = pool_size 4175 self.folder = folder 4176 self.db_codec = db_codec 4177 self._after_connection = after_connection 4178 self.find_or_make_work_folder() 4179 connstr = uri.split(':', 1)[1] 4180 # Simple URI processing 4181 connstr = connstr.lstrip() 4182 while connstr.startswith('/'): 4183 connstr = connstr[1:] 4184 if '=' in connstr: 4185 # Assume we have a regular ODBC connection string and just use it 4186 ruri = connstr 4187 else: 4188 # Assume only (local) dbname is passed in with OS auth 4189 database_name = connstr 4190 default_driver_name = 'Ingres' 4191 vnode = '(local)' 4192 servertype = 'ingres' 4193 ruri = 'Driver={%s};Server=%s;Database=%s' % (default_driver_name, vnode, database_name) 4194 def connector(cnxn=ruri,driver_args=driver_args): 4195 return self.driver.connect(cnxn,**driver_args)
4196 4197 self.connector = connector 4198 4199 # TODO if version is >= 10, set types['id'] to Identity column, see http://community.actian.com/wiki/Using_Ingres_Identity_Columns 4200 if do_connect: self.reconnect()
4201
4202 - def create_sequence_and_triggers(self, query, table, **args):
4203 # post create table auto inc code (if needed) 4204 # modify table to btree for performance.... 4205 # Older Ingres releases could use rule/trigger like Oracle above. 4206 if hasattr(table,'_primarykey'): 4207 modify_tbl_sql = 'modify %s to btree unique on %s' % \ 4208 (table._tablename, 4209 ', '.join(["'%s'" % x for x in table.primarykey])) 4210 self.execute(modify_tbl_sql) 4211 else: 4212 tmp_seqname='%s_iisq' % table._tablename 4213 query=query.replace(INGRES_SEQNAME, tmp_seqname) 4214 self.execute('create sequence %s' % tmp_seqname) 4215 self.execute(query) 4216 self.execute('modify %s to btree unique on %s' % (table._tablename, 'id'))
4217 4218
4219 - def lastrowid(self,table):
4220 tmp_seqname='%s_iisq' % table 4221 self.execute('select current value for %s' % tmp_seqname) 4222 return long(self.cursor.fetchone()[0]) # don't really need int type cast here...
4223
4224 4225 -class IngresUnicodeAdapter(IngresAdapter):
4226 4227 drivers = ('pyodbc',) 4228 4229 types = { 4230 'boolean': 'CHAR(1)', 4231 'string': 'NVARCHAR(%(length)s)', 4232 'text': 'NCLOB', 4233 'json': 'NCLOB', 4234 'password': 'NVARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 4235 'blob': 'BLOB', 4236 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 4237 'integer': 'INTEGER4', # or int8... 4238 'bigint': 'BIGINT', 4239 'float': 'FLOAT', 4240 'double': 'FLOAT8', 4241 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 4242 'date': 'ANSIDATE', 4243 'time': 'TIME WITHOUT TIME ZONE', 4244 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 4245 'id': 'INTEGER4 not null unique with default next value for %s'% INGRES_SEQNAME, 4246 'reference': 'INTEGER4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4247 'list:integer': 'NCLOB', 4248 'list:string': 'NCLOB', 4249 'list:reference': 'NCLOB', 4250 'big-id': 'BIGINT not null unique with default next value for %s'% INGRES_SEQNAME, 4251 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4252 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4253 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO 4254 }
4255
4256 -class SAPDBAdapter(BaseAdapter):
4257 drivers = ('sapdb',) 4258 4259 support_distributed_transaction = False 4260 types = { 4261 'boolean': 'CHAR(1)', 4262 'string': 'VARCHAR(%(length)s)', 4263 'text': 'LONG', 4264 'json': 'LONG', 4265 'password': 'VARCHAR(%(length)s)', 4266 'blob': 'LONG', 4267 'upload': 'VARCHAR(%(length)s)', 4268 'integer': 'INT', 4269 'bigint': 'BIGINT', 4270 'float': 'FLOAT', 4271 'double': 'DOUBLE PRECISION', 4272 'decimal': 'FIXED(%(precision)s,%(scale)s)', 4273 'date': 'DATE', 4274 'time': 'TIME', 4275 'datetime': 'TIMESTAMP', 4276 'id': 'INT PRIMARY KEY', 4277 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4278 'list:integer': 'LONG', 4279 'list:string': 'LONG', 4280 'list:reference': 'LONG', 4281 'big-id': 'BIGINT PRIMARY KEY', 4282 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4283 } 4284
4285 - def sequence_name(self,table):
4286 return '%s_id_Seq' % table
4287
4288 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
4289 if limitby: 4290 (lmin, lmax) = limitby 4291 if len(sql_w) > 1: 4292 sql_w_row = sql_w + ' AND w_row > %i' % lmin 4293 else: 4294 sql_w_row = 'WHERE w_row > %i' % lmin 4295 return '%s %s FROM (SELECT w_tmp.*, ROWNO w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNO=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) 4296 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
4297
4298 - def create_sequence_and_triggers(self, query, table, **args):
4299 # following lines should only be executed if table._sequence_name does not exist 4300 self.execute('CREATE SEQUENCE %s;' % table._sequence_name) 4301 self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ 4302 % (table._tablename, table._id.name, table._sequence_name)) 4303 self.execute(query)
4304 4305 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$') 4306 4307
4308 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4309 credential_decoder=IDENTITY, driver_args={}, 4310 adapter_args={}, do_connect=True, after_connection=None):
4311 self.db = db 4312 self.dbengine = "sapdb" 4313 self.uri = uri 4314 if do_connect: self.find_driver(adapter_args,uri) 4315 self.pool_size = pool_size 4316 self.folder = folder 4317 self.db_codec = db_codec 4318 self._after_connection = after_connection 4319 self.find_or_make_work_folder() 4320 ruri = uri.split('://',1)[1] 4321 m = self.REGEX_URI.match(ruri) 4322 if not m: 4323 raise SyntaxError("Invalid URI string in DAL") 4324 user = credential_decoder(m.group('user')) 4325 if not user: 4326 raise SyntaxError('User required') 4327 password = credential_decoder(m.group('password')) 4328 if not password: 4329 password = '' 4330 host = m.group('host') 4331 if not host: 4332 raise SyntaxError('Host name required') 4333 db = m.group('db') 4334 if not db: 4335 raise SyntaxError('Database name required') 4336 def connector(user=user, password=password, database=db, 4337 host=host, driver_args=driver_args): 4338 return self.driver.Connection(user, password, database, 4339 host, **driver_args)
4340 self.connector = connector 4341 if do_connect: self.reconnect()
4342
4343 - def lastrowid(self,table):
4344 self.execute("select %s.NEXTVAL from dual" % table._sequence_name) 4345 return long(self.cursor.fetchone()[0])
4346
4347 -class CubridAdapter(MySQLAdapter):
4348 drivers = ('cubriddb',) 4349 4350 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$') 4351
4352 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', 4353 credential_decoder=IDENTITY, driver_args={}, 4354 adapter_args={}, do_connect=True, after_connection=None):
4355 self.db = db 4356 self.dbengine = "cubrid" 4357 self.uri = uri 4358 if do_connect: self.find_driver(adapter_args,uri) 4359 self.pool_size = pool_size 4360 self.folder = folder 4361 self.db_codec = db_codec 4362 self._after_connection = after_connection 4363 self.find_or_make_work_folder() 4364 ruri = uri.split('://',1)[1] 4365 m = self.REGEX_URI.match(ruri) 4366 if not m: 4367 raise SyntaxError( 4368 "Invalid URI string in DAL: %s" % self.uri) 4369 user = credential_decoder(m.group('user')) 4370 if not user: 4371 raise SyntaxError('User required') 4372 password = credential_decoder(m.group('password')) 4373 if not password: 4374 password = '' 4375 host = m.group('host') 4376 if not host: 4377 raise SyntaxError('Host name required') 4378 db = m.group('db') 4379 if not db: 4380 raise SyntaxError('Database name required') 4381 port = int(m.group('port') or '30000') 4382 charset = m.group('charset') or 'utf8' 4383 user = credential_decoder(user) 4384 passwd = credential_decoder(password) 4385 def connector(host=host,port=port,db=db, 4386 user=user,passwd=password,driver_args=driver_args): 4387 return self.driver.connect(host,port,db,user,passwd,**driver_args)
4388 self.connector = connector 4389 if do_connect: self.reconnect()
4390
4391 - def after_connection(self):
4392 self.execute('SET FOREIGN_KEY_CHECKS=1;') 4393 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
4394
4395 4396 ######## GAE MySQL ########## 4397 4398 -class DatabaseStoredFile:
4399 4400 web2py_filesystem = False 4401
4402 - def escape(self,obj):
4403 return self.db._adapter.escape(obj)
4404
4405 - def __init__(self,db,filename,mode):
4406 if not db._adapter.dbengine in ('mysql', 'postgres', 'sqlite'): 4407 raise RuntimeError("only MySQL/Postgres/SQLite can store metadata .table files in database for now") 4408 self.db = db 4409 self.filename = filename 4410 self.mode = mode 4411 if not self.web2py_filesystem: 4412 if db._adapter.dbengine == 'mysql': 4413 sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content LONGTEXT, PRIMARY KEY(path) ) ENGINE=InnoDB;" 4414 elif db._adapter.dbengine in ('postgres', 'sqlite'): 4415 sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content TEXT, PRIMARY KEY(path));" 4416 self.db.executesql(sql) 4417 DatabaseStoredFile.web2py_filesystem = True 4418 self.p=0 4419 self.data = '' 4420 if mode in ('r','rw','a'): 4421 query = "SELECT content FROM web2py_filesystem WHERE path='%s'" \ 4422 % filename 4423 rows = self.db.executesql(query) 4424 if rows: 4425 self.data = rows[0][0] 4426 elif exists(filename): 4427 datafile = open(filename, 'r') 4428 try: 4429 self.data = datafile.read() 4430 finally: 4431 datafile.close() 4432 elif mode in ('r','rw'): 4433 raise RuntimeError("File %s does not exist" % filename)
4434
4435 - def read(self, bytes):
4436 data = self.data[self.p:self.p+bytes] 4437 self.p += len(data) 4438 return data
4439
4440 - def readline(self):
4441 i = self.data.find('\n',self.p)+1 4442 if i>0: 4443 data, self.p = self.data[self.p:i], i 4444 else: 4445 data, self.p = self.data[self.p:], len(self.data) 4446 return data
4447
4448 - def write(self,data):
4449 self.data += data
4450
4451 - def close_connection(self):
4452 if self.db is not None: 4453 self.db.executesql( 4454 "DELETE FROM web2py_filesystem WHERE path='%s'" % self.filename) 4455 query = "INSERT INTO web2py_filesystem(path,content) VALUES ('%s','%s')"\ 4456 % (self.filename, self.data.replace("'","''")) 4457 self.db.executesql(query) 4458 self.db.commit() 4459 self.db = None
4460
4461 - def close(self):
4462 self.close_connection()
4463 4464 @staticmethod
4465 - def exists(db, filename):
4466 if exists(filename): 4467 return True 4468 query = "SELECT path FROM web2py_filesystem WHERE path='%s'" % filename 4469 try: 4470 if db.executesql(query): 4471 return True 4472 except Exception, e: 4473 if not (db._adapter.isOperationalError(e) or 4474 db._adapter.isProgrammingError(e)): 4475 raise 4476 # no web2py_filesystem found? 4477 tb = traceback.format_exc() 4478 LOGGER.error("Could not retrieve %s\n%s" % (filename, tb)) 4479 return False
4480
4481 4482 -class UseDatabaseStoredFile:
4483
4484 - def file_exists(self, filename):
4485 return DatabaseStoredFile.exists(self.db,filename)
4486
4487 - def file_open(self, filename, mode='rb', lock=True):
4488 return DatabaseStoredFile(self.db,filename,mode)
4489
4490 - def file_close(self, fileobj):
4491 fileobj.close_connection()
4492
4493 - def file_delete(self,filename):
4494 query = "DELETE FROM web2py_filesystem WHERE path='%s'" % filename 4495 self.db.executesql(query) 4496 self.db.commit()
4497
4498 -class GoogleSQLAdapter(UseDatabaseStoredFile,MySQLAdapter):
4499 uploads_in_blob = True 4500 4501 REGEX_URI = re.compile('^(?P<instance>.*)/(?P<db>.*)$') 4502
4503 - def __init__(self, db, uri='google:sql://realm:domain/database', 4504 pool_size=0, folder=None, db_codec='UTF-8', 4505 credential_decoder=IDENTITY, driver_args={}, 4506 adapter_args={}, do_connect=True, after_connection=None):
4507 4508 self.db = db 4509 self.dbengine = "mysql" 4510 self.uri = uri 4511 self.pool_size = pool_size 4512 self.db_codec = db_codec 4513 self._after_connection = after_connection 4514 if do_connect: self.find_driver(adapter_args, uri) 4515 self.folder = folder or pjoin('$HOME',THREAD_LOCAL.folder.split( 4516 os.sep+'applications'+os.sep,1)[1]) 4517 ruri = uri.split("://")[1] 4518 m = self.REGEX_URI.match(ruri) 4519 if not m: 4520 raise SyntaxError("Invalid URI string in SQLDB: %s" % self.uri) 4521 instance = credential_decoder(m.group('instance')) 4522 self.dbstring = db = credential_decoder(m.group('db')) 4523 driver_args['instance'] = instance 4524 if not 'charset' in driver_args: 4525 driver_args['charset'] = 'utf8' 4526 self.createdb = createdb = adapter_args.get('createdb',True) 4527 if not createdb: 4528 driver_args['database'] = db 4529 def connector(driver_args=driver_args): 4530 return rdbms.connect(**driver_args)
4531 self.connector = connector 4532 if do_connect: self.reconnect()
4533
4534 - def after_connection(self):
4535 if self.createdb: 4536 # self.execute('DROP DATABASE %s' % self.dbstring) 4537 self.execute('CREATE DATABASE IF NOT EXISTS %s' % self.dbstring) 4538 self.execute('USE %s' % self.dbstring) 4539 self.execute("SET FOREIGN_KEY_CHECKS=1;") 4540 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
4541
4542 - def execute(self, command, *a, **b):
4543 return self.log_execute(command.decode('utf8'), *a, **b)
4544
4545 - def find_driver(self,adapter_args,uri=None):
4546 self.adapter_args = adapter_args 4547 self.driver = "google"
4548
4549 -class NoSQLAdapter(BaseAdapter):
4550 can_select_for_update = False 4551 4552 @staticmethod
4553 - def to_unicode(obj):
4554 if isinstance(obj, str): 4555 return obj.decode('utf8') 4556 elif not isinstance(obj, unicode): 4557 return unicode(obj) 4558 return obj
4559
4560 - def id_query(self, table):
4561 return table._id > 0
4562
4563 - def represent(self, obj, fieldtype):
4564 field_is_type = fieldtype.startswith 4565 if isinstance(obj, CALLABLETYPES): 4566 obj = obj() 4567 if isinstance(fieldtype, SQLCustomType): 4568 return fieldtype.encoder(obj) 4569 if isinstance(obj, (Expression, Field)): 4570 raise SyntaxError("non supported on GAE") 4571 if self.dbengine == 'google:datastore': 4572 if isinstance(fieldtype, gae.Property): 4573 return obj 4574 is_string = isinstance(fieldtype,str) 4575 is_list = is_string and field_is_type('list:') 4576 if is_list: 4577 if not obj: 4578 obj = [] 4579 if not isinstance(obj, (list, tuple)): 4580 obj = [obj] 4581 if obj == '' and not \ 4582 (is_string and fieldtype[:2] in ['st','te', 'pa','up']): 4583 return None 4584 if not obj is None: 4585 if isinstance(obj, list) and not is_list: 4586 obj = [self.represent(o, fieldtype) for o in obj] 4587 elif fieldtype in ('integer','bigint','id'): 4588 obj = long(obj) 4589 elif fieldtype == 'double': 4590 obj = float(obj) 4591 elif is_string and field_is_type('reference'): 4592 if isinstance(obj, (Row, Reference)): 4593 obj = obj['id'] 4594 obj = long(obj) 4595 elif fieldtype == 'boolean': 4596 if obj and not str(obj)[0].upper() in '0F': 4597 obj = True 4598 else: 4599 obj = False 4600 elif fieldtype == 'date': 4601 if not isinstance(obj, datetime.date): 4602 (y, m, d) = map(int,str(obj).strip().split('-')) 4603 obj = datetime.date(y, m, d) 4604 elif isinstance(obj,datetime.datetime): 4605 (y, m, d) = (obj.year, obj.month, obj.day) 4606 obj = datetime.date(y, m, d) 4607 elif fieldtype == 'time': 4608 if not isinstance(obj, datetime.time): 4609 time_items = map(int,str(obj).strip().split(':')[:3]) 4610 if len(time_items) == 3: 4611 (h, mi, s) = time_items 4612 else: 4613 (h, mi, s) = time_items + [0] 4614 obj = datetime.time(h, mi, s) 4615 elif fieldtype == 'datetime': 4616 if not isinstance(obj, datetime.datetime): 4617 (y, m, d) = map(int,str(obj)[:10].strip().split('-')) 4618 time_items = map(int,str(obj)[11:].strip().split(':')[:3]) 4619 while len(time_items)<3: 4620 time_items.append(0) 4621 (h, mi, s) = time_items 4622 obj = datetime.datetime(y, m, d, h, mi, s) 4623 elif fieldtype == 'blob': 4624 pass 4625 elif fieldtype == 'json': 4626 if isinstance(obj, basestring): 4627 obj = self.to_unicode(obj) 4628 if have_serializers: 4629 obj = serializers.loads_json(obj) 4630 elif simplejson: 4631 obj = simplejson.loads(obj) 4632 else: 4633 raise RuntimeError("missing simplejson") 4634 elif is_string and field_is_type('list:string'): 4635 return map(self.to_unicode,obj) 4636 elif is_list: 4637 return map(int,obj) 4638 else: 4639 obj = self.to_unicode(obj) 4640 return obj
4641
4642 - def _insert(self,table,fields):
4643 return 'insert %s in %s' % (fields, table)
4644
4645 - def _count(self,query,distinct=None):
4646 return 'count %s' % repr(query)
4647
4648 - def _select(self,query,fields,attributes):
4649 return 'select %s where %s' % (repr(fields), repr(query))
4650
4651 - def _delete(self,tablename, query):
4652 return 'delete %s where %s' % (repr(tablename),repr(query))
4653
4654 - def _update(self,tablename,query,fields):
4655 return 'update %s (%s) where %s' % (repr(tablename), 4656 repr(fields),repr(query))
4657
4658 - def commit(self):
4659 """ 4660 remember: no transactions on many NoSQL 4661 """ 4662 pass
4663
4664 - def rollback(self):
4665 """ 4666 remember: no transactions on many NoSQL 4667 """ 4668 pass
4669
4670 - def close_connection(self):
4671 """ 4672 remember: no transactions on many NoSQL 4673 """ 4674 pass
4675 4676 4677 # these functions should never be called!
4678 - def OR(self,first,second): raise SyntaxError("Not supported")
4679 - def AND(self,first,second): raise SyntaxError("Not supported")
4680 - def AS(self,first,second): raise SyntaxError("Not supported")
4681 - def ON(self,first,second): raise SyntaxError("Not supported")
4682 - def STARTSWITH(self,first,second=None): raise SyntaxError("Not supported")
4683 - def ENDSWITH(self,first,second=None): raise SyntaxError("Not supported")
4684 - def ADD(self,first,second): raise SyntaxError("Not supported")
4685 - def SUB(self,first,second): raise SyntaxError("Not supported")
4686 - def MUL(self,first,second): raise SyntaxError("Not supported")
4687 - def DIV(self,first,second): raise SyntaxError("Not supported")
4688 - def LOWER(self,first): raise SyntaxError("Not supported")
4689 - def UPPER(self,first): raise SyntaxError("Not supported")
4690 - def EXTRACT(self,first,what): raise SyntaxError("Not supported")
4691 - def LENGTH(self, first): raise SyntaxError("Not supported")
4692 - def AGGREGATE(self,first,what): raise SyntaxError("Not supported")
4693 - def LEFT_JOIN(self): raise SyntaxError("Not supported")
4694 - def RANDOM(self): raise SyntaxError("Not supported")
4695 - def SUBSTRING(self,field,parameters): raise SyntaxError("Not supported")
4696 - def PRIMARY_KEY(self,key): raise SyntaxError("Not supported")
4697 - def ILIKE(self,first,second): raise SyntaxError("Not supported")
4698 - def drop(self,table,mode): raise SyntaxError("Not supported")
4699 - def alias(self,table,alias): raise SyntaxError("Not supported")
4700 - def migrate_table(self,*a,**b): raise SyntaxError("Not supported")
4701 - def distributed_transaction_begin(self,key): raise SyntaxError("Not supported")
4702 - def prepare(self,key): raise SyntaxError("Not supported")
4703 - def commit_prepared(self,key): raise SyntaxError("Not supported")
4704 - def rollback_prepared(self,key): raise SyntaxError("Not supported")
4705 - def concat_add(self,table): raise SyntaxError("Not supported")
4706 - def constraint_name(self, table, fieldname): raise SyntaxError("Not supported")
4707 - def create_sequence_and_triggers(self, query, table, **args): pass
4708 - def log_execute(self,*a,**b): raise SyntaxError("Not supported")
4709 - def execute(self,*a,**b): raise SyntaxError("Not supported")
4710 - def represent_exceptions(self, obj, fieldtype): raise SyntaxError("Not supported")
4711 - def lastrowid(self,table): raise SyntaxError("Not supported")
4712 - def rowslice(self,rows,minimum=0,maximum=None): raise SyntaxError("Not supported")
4713
4714 4715 -class GAEF(object):
4716 - def __init__(self,name,op,value,apply):
4717 self.name=name=='id' and '__key__' or name 4718 self.op=op 4719 self.value=value 4720 self.apply=apply
4721 - def __repr__(self):
4722 return '(%s %s %s:%s)' % (self.name, self.op, repr(self.value), type(self.value))
4723
4724 -class GoogleDatastoreAdapter(NoSQLAdapter):
4725 """ 4726 NDB: 4727 4728 You can enable NDB by using adapter_args: 4729 4730 db = DAL('google:datastore', adapter_args={'ndb_settings':ndb_settings, 'use_ndb':True}) 4731 4732 ndb_settings is optional and can be used for per model caching settings. 4733 It must be a dict in this form: 4734 ndb_settings = {<table_name>:{<variable_name>:<variable_value>}} 4735 See: https://developers.google.com/appengine/docs/python/ndb/cache 4736 """ 4737 4738 uploads_in_blob = True 4739 types = {} 4740
4741 - def file_exists(self, filename): pass
4742 - def file_open(self, filename, mode='rb', lock=True): pass
4743 - def file_close(self, fileobj): pass
4744 4745 REGEX_NAMESPACE = re.compile('.*://(?P<namespace>.+)') 4746
4747 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4748 credential_decoder=IDENTITY, driver_args={}, 4749 adapter_args={}, do_connect=True, after_connection=None):
4750 self.use_ndb = ('use_ndb' in adapter_args) and adapter_args['use_ndb'] 4751 if self.use_ndb is True: 4752 self.types.update({ 4753 'boolean': ndb.BooleanProperty, 4754 'string': (lambda **kwargs: ndb.StringProperty(**kwargs)), 4755 'text': ndb.TextProperty, 4756 'json': ndb.TextProperty, 4757 'password': ndb.StringProperty, 4758 'blob': ndb.BlobProperty, 4759 'upload': ndb.StringProperty, 4760 'integer': ndb.IntegerProperty, 4761 'bigint': ndb.IntegerProperty, 4762 'float': ndb.FloatProperty, 4763 'double': ndb.FloatProperty, 4764 'decimal': NDBDecimalProperty, 4765 'date': ndb.DateProperty, 4766 'time': ndb.TimeProperty, 4767 'datetime': ndb.DateTimeProperty, 4768 'id': None, 4769 'reference': ndb.IntegerProperty, 4770 'list:string': (lambda **kwargs: ndb.StringProperty(repeated=True,default=None, **kwargs)), 4771 'list:integer': (lambda **kwargs: ndb.IntegerProperty(repeated=True,default=None, **kwargs)), 4772 'list:reference': (lambda **kwargs: ndb.IntegerProperty(repeated=True,default=None, **kwargs)), 4773 }) 4774 else: 4775 self.types.update({ 4776 'boolean': gae.BooleanProperty, 4777 'string': (lambda **kwargs: gae.StringProperty(multiline=True, **kwargs)), 4778 'text': gae.TextProperty, 4779 'json': gae.TextProperty, 4780 'password': gae.StringProperty, 4781 'blob': gae.BlobProperty, 4782 'upload': gae.StringProperty, 4783 'integer': gae.IntegerProperty, 4784 'bigint': gae.IntegerProperty, 4785 'float': gae.FloatProperty, 4786 'double': gae.FloatProperty, 4787 'decimal': GAEDecimalProperty, 4788 'date': gae.DateProperty, 4789 'time': gae.TimeProperty, 4790 'datetime': gae.DateTimeProperty, 4791 'id': None, 4792 'reference': gae.IntegerProperty, 4793 'list:string': (lambda **kwargs: gae.StringListProperty(default=None, **kwargs)), 4794 'list:integer': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)), 4795 'list:reference': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)), 4796 }) 4797 self.db = db 4798 self.uri = uri 4799 self.dbengine = 'google:datastore' 4800 self.folder = folder 4801 db['_lastsql'] = '' 4802 self.db_codec = 'UTF-8' 4803 self._after_connection = after_connection 4804 self.pool_size = 0 4805 match = self.REGEX_NAMESPACE.match(uri) 4806 if match: 4807 namespace_manager.set_namespace(match.group('namespace')) 4808 self.keyfunc = (self.use_ndb and ndb.Key) or Key.from_path 4809 4810 self.ndb_settings = None 4811 if 'ndb_settings' in adapter_args: 4812 self.ndb_settings = adapter_args['ndb_settings']
4813
4814 - def parse_id(self, value, field_type):
4815 return value
4816
4817 - def create_table(self,table,migrate=True,fake_migrate=False, polymodel=None):
4818 myfields = {} 4819 for field in table: 4820 if isinstance(polymodel,Table) and field.name in polymodel.fields(): 4821 continue 4822 attr = {} 4823 if isinstance(field.custom_qualifier, dict): 4824 #this is custom properties to add to the GAE field declartion 4825 attr = field.custom_qualifier 4826 field_type = field.type 4827 if isinstance(field_type, SQLCustomType): 4828 ftype = self.types[field_type.native or field_type.type](**attr) 4829 elif isinstance(field_type, ((self.use_ndb and ndb.Property) or gae.Property)): 4830 ftype = field_type 4831 elif field_type.startswith('id'): 4832 continue 4833 elif field_type.startswith('decimal'): 4834 precision, scale = field_type[7:].strip('()').split(',') 4835 precision = int(precision) 4836 scale = int(scale) 4837 dec_cls = (self.use_ndb and NDBDecimalProperty) or GAEDecimalProperty 4838 ftype = dec_cls(precision, scale, **attr) 4839 elif field_type.startswith('reference'): 4840 if field.notnull: 4841 attr = dict(required=True) 4842 referenced = field_type[10:].strip() 4843 ftype = self.types[field_type[:9]](referenced, **attr) 4844 elif field_type.startswith('list:reference'): 4845 if field.notnull: 4846 attr['required'] = True 4847 referenced = field_type[15:].strip() 4848 ftype = self.types[field_type[:14]](**attr) 4849 elif field_type.startswith('list:'): 4850 ftype = self.types[field_type](**attr) 4851 elif not field_type in self.types\ 4852 or not self.types[field_type]: 4853 raise SyntaxError('Field: unknown field type: %s' % field_type) 4854 else: 4855 ftype = self.types[field_type](**attr) 4856 myfields[field.name] = ftype 4857 if not polymodel: 4858 model_cls = (self.use_ndb and ndb.Model) or gae.Model 4859 table._tableobj = classobj(table._tablename, (model_cls, ), myfields) 4860 if self.use_ndb: 4861 # Set NDB caching variables 4862 if self.ndb_settings and (table._tablename in self.ndb_settings): 4863 for k, v in self.ndb_settings.iteritems(): 4864 setattr(table._tableobj, k, v) 4865 elif polymodel==True: 4866 pm_cls = (self.use_ndb and NDBPolyModel) or PolyModel 4867 table._tableobj = classobj(table._tablename, (pm_cls, ), myfields) 4868 elif isinstance(polymodel,Table): 4869 table._tableobj = classobj(table._tablename, (polymodel._tableobj, ), myfields) 4870 else: 4871 raise SyntaxError("polymodel must be None, True, a table or a tablename") 4872 return None
4873
4874 - def expand(self,expression,field_type=None):
4875 if isinstance(expression,Field): 4876 if expression.type in ('text', 'blob', 'json'): 4877 raise SyntaxError('AppEngine does not index by: %s' % expression.type) 4878 return expression.name 4879 elif isinstance(expression, (Expression, Query)): 4880 if not expression.second is None: 4881 return expression.op(expression.first, expression.second) 4882 elif not expression.first is None: 4883 return expression.op(expression.first) 4884 else: 4885 return expression.op() 4886 elif field_type: 4887 return self.represent(expression,field_type) 4888 elif isinstance(expression,(list,tuple)): 4889 return ','.join([self.represent(item,field_type) for item in expression]) 4890 else: 4891 return str(expression)
4892 4893 ### TODO from gql.py Expression
4894 - def AND(self,first,second):
4895 a = self.expand(first) 4896 b = self.expand(second) 4897 if b[0].name=='__key__' and a[0].name!='__key__': 4898 return b+a 4899 return a+b
4900
4901 - def EQ(self,first,second=None):
4902 if isinstance(second, Key): 4903 return [GAEF(first.name,'=',second,lambda a,b:a==b)] 4904 return [GAEF(first.name,'=',self.represent(second,first.type),lambda a,b:a==b)]
4905
4906 - def NE(self,first,second=None):
4907 if first.type != 'id': 4908 return [GAEF(first.name,'!=',self.represent(second,first.type),lambda a,b:a!=b)] 4909 else: 4910 if not second is None: 4911 second = Key.from_path(first._tablename, long(second)) 4912 return [GAEF(first.name,'!=',second,lambda a,b:a!=b)]
4913
4914 - def LT(self,first,second=None):
4915 if first.type != 'id': 4916 return [GAEF(first.name,'<',self.represent(second,first.type),lambda a,b:a<b)] 4917 else: 4918 second = Key.from_path(first._tablename, long(second)) 4919 return [GAEF(first.name,'<',second,lambda a,b:a<b)]
4920
4921 - def LE(self,first,second=None):
4922 if first.type != 'id': 4923 return [GAEF(first.name,'<=',self.represent(second,first.type),lambda a,b:a<=b)] 4924 else: 4925 second = Key.from_path(first._tablename, long(second)) 4926 return [GAEF(first.name,'<=',second,lambda a,b:a<=b)]
4927
4928 - def GT(self,first,second=None):
4929 if first.type != 'id' or second==0 or second == '0': 4930 return [GAEF(first.name,'>',self.represent(second,first.type),lambda a,b:a>b)] 4931 else: 4932 second = Key.from_path(first._tablename, long(second)) 4933 return [GAEF(first.name,'>',second,lambda a,b:a>b)]
4934
4935 - def GE(self,first,second=None):
4936 if first.type != 'id': 4937 return [GAEF(first.name,'>=',self.represent(second,first.type),lambda a,b:a>=b)] 4938 else: 4939 second = Key.from_path(first._tablename, long(second)) 4940 return [GAEF(first.name,'>=',second,lambda a,b:a>=b)]
4941
4942 - def INVERT(self,first):
4943 return '-%s' % first.name
4944
4945 - def COMMA(self,first,second):
4946 return '%s, %s' % (self.expand(first),self.expand(second))
4947
4948 - def BELONGS(self,first,second=None):
4949 if not isinstance(second,(list, tuple, set)): 4950 raise SyntaxError("Not supported") 4951 if not self.use_ndb: 4952 if isinstance(second,set): 4953 second = list(second) 4954 if first.type == 'id': 4955 second = [Key.from_path(first._tablename, int(i)) for i in second] 4956 return [GAEF(first.name,'in',second,lambda a,b:a in b)]
4957
4958 - def CONTAINS(self,first,second,case_sensitive=False):
4959 # silently ignoring: GAE can only do case sensitive matches! 4960 if not first.type.startswith('list:'): 4961 raise SyntaxError("Not supported") 4962 return [GAEF(first.name,'=',self.expand(second,first.type[5:]),lambda a,b:b in a)]
4963
4964 - def NOT(self,first):
4965 nops = { self.EQ: self.NE, 4966 self.NE: self.EQ, 4967 self.LT: self.GE, 4968 self.GT: self.LE, 4969 self.LE: self.GT, 4970 self.GE: self.LT} 4971 if not isinstance(first,Query): 4972 raise SyntaxError("Not suported") 4973 nop = nops.get(first.op,None) 4974 if not nop: 4975 raise SyntaxError("Not suported %s" % first.op.__name__) 4976 first.op = nop 4977 return self.expand(first)
4978
4979 - def truncate(self,table,mode):
4980 self.db(self.db._adapter.id_query(table)).delete()
4981 4982 GAE_FILTER_OPTIONS = { 4983 '=': lambda q, t, p, v: q.filter(getattr(t,p) == v), 4984 '>': lambda q, t, p, v: q.filter(getattr(t,p) > v), 4985 '<': lambda q, t, p, v: q.filter(getattr(t,p) < v), 4986 '<=': lambda q, t, p, v: q.filter(getattr(t,p) <= v), 4987 '>=': lambda q, t, p, v: q.filter(getattr(t,p) >= v), 4988 '!=': lambda q, t, p, v: q.filter(getattr(t,p) != v), 4989 'in': lambda q, t, p, v: q.filter(getattr(t,p).IN(v)), 4990 } 4991
4992 - def filter(self, query, tableobj, prop, op, value):
4993 return self.GAE_FILTER_OPTIONS[op](query, tableobj, prop, value)
4994
4995 - def select_raw(self,query,fields=None,attributes=None):
4996 db = self.db 4997 fields = fields or [] 4998 attributes = attributes or {} 4999 args_get = attributes.get 5000 new_fields = [] 5001 for item in fields: 5002 if isinstance(item,SQLALL): 5003 new_fields += item._table 5004 else: 5005 new_fields.append(item) 5006 fields = new_fields 5007 if query: 5008 tablename = self.get_table(query) 5009 elif fields: 5010 tablename = fields[0].tablename 5011 query = db._adapter.id_query(fields[0].table) 5012 else: 5013 raise SyntaxError("Unable to determine a tablename") 5014 5015 if query: 5016 if use_common_filters(query): 5017 query = self.common_filter(query,[tablename]) 5018 5019 #tableobj is a GAE/NDB Model class (or subclass) 5020 tableobj = db[tablename]._tableobj 5021 filters = self.expand(query) 5022 5023 projection = None 5024 if len(db[tablename].fields) == len(fields): 5025 #getting all fields, not a projection query 5026 projection = None 5027 elif args_get('projection') == True: 5028 projection = [] 5029 for f in fields: 5030 if f.type in ['text', 'blob', 'json']: 5031 raise SyntaxError( 5032 "text and blob field types not allowed in projection queries") 5033 else: 5034 projection.append(f.name) 5035 elif args_get('filterfields') == True: 5036 projection = [] 5037 for f in fields: 5038 projection.append(f.name) 5039 5040 # real projection's can't include 'id'. 5041 # it will be added to the result later 5042 query_projection = [ 5043 p for p in projection if \ 5044 p != db[tablename]._id.name] if projection and \ 5045 args_get('projection') == True\ 5046 else None 5047 5048 cursor = None 5049 if isinstance(args_get('reusecursor'), str): 5050 cursor = args_get('reusecursor') 5051 if self.use_ndb: 5052 qo = ndb.QueryOptions(projection=query_projection, cursor=cursor) 5053 items = tableobj.query(default_options=qo) 5054 else: 5055 items = gae.Query(tableobj, projection=query_projection, 5056 cursor=cursor) 5057 5058 for filter in filters: 5059 if args_get('projection') == True and \ 5060 filter.name in query_projection and \ 5061 filter.op in ['=', '<=', '>=']: 5062 raise SyntaxError( 5063 "projection fields cannot have equality filters") 5064 if filter.name=='__key__' and filter.op=='>' and filter.value==0: 5065 continue 5066 elif filter.name=='__key__' and filter.op=='=': 5067 if filter.value==0: 5068 items = [] 5069 elif isinstance(filter.value, (self.use_ndb and ndb.Key) or Key): 5070 # key qeuries return a class instance, 5071 # can't use projection 5072 # extra values will be ignored in post-processing later 5073 item = filter.value.get() if self.use_ndb else tableobj.get(filter.value) 5074 items = (item and [item]) or [] 5075 else: 5076 # key qeuries return a class instance, 5077 # can't use projection 5078 # extra values will be ignored in post-processing later 5079 item = tableobj.get_by_id(filter.value) 5080 items = (item and [item]) or [] 5081 elif isinstance(items,list): # i.e. there is a single record! 5082 items = [i for i in items if filter.apply( 5083 getattr(item,filter.name),filter.value)] 5084 else: 5085 if filter.name=='__key__' and filter.op != 'in': 5086 if self.use_ndb: 5087 items.order(tableobj._key) 5088 else: 5089 items.order('__key__') 5090 items = self.filter(items, tableobj, filter.name, 5091 filter.op, filter.value) \ 5092 if self.use_ndb else \ 5093 items.filter('%s %s' % (filter.name,filter.op), 5094 filter.value) 5095 5096 if not isinstance(items,list): 5097 if args_get('left', None): 5098 raise SyntaxError('Set: no left join in appengine') 5099 if args_get('groupby', None): 5100 raise SyntaxError('Set: no groupby in appengine') 5101 orderby = args_get('orderby', False) 5102 if orderby: 5103 ### THIS REALLY NEEDS IMPROVEMENT !!! 5104 if isinstance(orderby, (list, tuple)): 5105 orderby = xorify(orderby) 5106 if isinstance(orderby,Expression): 5107 orderby = self.expand(orderby) 5108 orders = orderby.split(', ') 5109 for order in orders: 5110 if self.use_ndb: 5111 #TODO There must be a better way 5112 def make_order(o): 5113 s = str(o) 5114 desc = s[0] == '-' 5115 s = (desc and s[1:]) or s 5116 return (desc and -getattr(tableobj, s)) or getattr(tableobj, s)
5117 _order = {'-id':-tableobj._key,'id':tableobj._key}.get(order) 5118 if _order is None: 5119 _order = make_order(order) 5120 items = items.order(_order) 5121 else: 5122 order={'-id':'-__key__','id':'__key__'}.get(order,order) 5123 items = items.order(order) 5124 if args_get('limitby', None): 5125 (lmin, lmax) = attributes['limitby'] 5126 (limit, offset) = (lmax - lmin, lmin) 5127 if self.use_ndb: 5128 rows, cursor, more = items.fetch_page(limit,offset=offset) 5129 else: 5130 rows = items.fetch(limit,offset=offset) 5131 #cursor is only useful if there was a limit and we didn't return 5132 # all results 5133 if args_get('reusecursor'): 5134 db['_lastcursor'] = cursor if self.use_ndb else items.cursor() 5135 items = rows 5136 return (items, tablename, projection or db[tablename].fields)
5137
5138 - def select(self,query,fields,attributes):
5139 """ 5140 This is the GAE version of select. some notes to consider: 5141 - db['_lastsql'] is not set because there is not SQL statement string 5142 for a GAE query 5143 - 'nativeRef' is a magical fieldname used for self references on GAE 5144 - optional attribute 'projection' when set to True will trigger 5145 use of the GAE projection queries. note that there are rules for 5146 what is accepted imposed by GAE: each field must be indexed, 5147 projection queries cannot contain blob or text fields, and you 5148 cannot use == and also select that same field. see https://developers.google.com/appengine/docs/python/datastore/queries#Query_Projection 5149 - optional attribute 'filterfields' when set to True web2py will only 5150 parse the explicitly listed fields into the Rows object, even though 5151 all fields are returned in the query. This can be used to reduce 5152 memory usage in cases where true projection queries are not 5153 usable. 5154 - optional attribute 'reusecursor' allows use of cursor with queries 5155 that have the limitby attribute. Set the attribute to True for the 5156 first query, set it to the value of db['_lastcursor'] to continue 5157 a previous query. The user must save the cursor value between 5158 requests, and the filters must be identical. It is up to the user 5159 to follow google's limitations: https://developers.google.com/appengine/docs/python/datastore/queries#Query_Cursors 5160 """ 5161 5162 (items, tablename, fields) = self.select_raw(query,fields,attributes) 5163 # self.db['_lastsql'] = self._select(query,fields,attributes) 5164 rows = [[(t==self.db[tablename]._id.name and item) or \ 5165 (t=='nativeRef' and item) or getattr(item, t) \ 5166 for t in fields] for item in items] 5167 colnames = ['%s.%s' % (tablename, t) for t in fields] 5168 processor = attributes.get('processor',self.parse) 5169 return processor(rows,fields,colnames,False)
5170
5171 - def count(self,query,distinct=None,limit=None):
5172 if distinct: 5173 raise RuntimeError("COUNT DISTINCT not supported") 5174 (items, tablename, fields) = self.select_raw(query) 5175 # self.db['_lastsql'] = self._count(query) 5176 try: 5177 return len(items) 5178 except TypeError: 5179 return items.count(limit=limit)
5180
5181 - def delete(self,tablename, query):
5182 """ 5183 This function was changed on 2010-05-04 because according to 5184 http://code.google.com/p/googleappengine/issues/detail?id=3119 5185 GAE no longer supports deleting more than 1000 records. 5186 """ 5187 # self.db['_lastsql'] = self._delete(tablename,query) 5188 (items, tablename, fields) = self.select_raw(query) 5189 # items can be one item or a query 5190 if not isinstance(items,list): 5191 #use a keys_only query to ensure that this runs as a datastore 5192 # small operations 5193 leftitems = items.fetch(1000, keys_only=True) 5194 counter = 0 5195 while len(leftitems): 5196 counter += len(leftitems) 5197 if self.use_ndb: 5198 ndb.delete_multi(leftitems) 5199 else: 5200 gae.delete(leftitems) 5201 leftitems = items.fetch(1000, keys_only=True) 5202 else: 5203 counter = len(items) 5204 if self.use_ndb: 5205 ndb.delete_multi([item.key for item in items]) 5206 else: 5207 gae.delete(items) 5208 return counter
5209
5210 - def update(self,tablename,query,update_fields):
5211 # self.db['_lastsql'] = self._update(tablename,query,update_fields) 5212 (items, tablename, fields) = self.select_raw(query) 5213 counter = 0 5214 for item in items: 5215 for field, value in update_fields: 5216 setattr(item, field.name, self.represent(value,field.type)) 5217 item.put() 5218 counter += 1 5219 LOGGER.info(str(counter)) 5220 return counter
5221
5222 - def insert(self,table,fields):
5223 dfields=dict((f.name,self.represent(v,f.type)) for f,v in fields) 5224 # table._db['_lastsql'] = self._insert(table,fields) 5225 tmp = table._tableobj(**dfields) 5226 tmp.put() 5227 key = tmp.key if self.use_ndb else tmp.key() 5228 rid = Reference(key.id()) 5229 (rid._table, rid._record, rid._gaekey) = (table, None, key) 5230 return rid
5231
5232 - def bulk_insert(self,table,items):
5233 parsed_items = [] 5234 for item in items: 5235 dfields=dict((f.name,self.represent(v,f.type)) for f,v in item) 5236 parsed_items.append(table._tableobj(**dfields)) 5237 if self.use_ndb: 5238 ndb.put_multi(parsed_items) 5239 else: 5240 gae.put(parsed_items) 5241 return True
5242
5243 -def uuid2int(uuidv):
5244 return uuid.UUID(uuidv).int
5245
5246 -def int2uuid(n):
5247 return str(uuid.UUID(int=n))
5248
5249 -class CouchDBAdapter(NoSQLAdapter):
5250 drivers = ('couchdb',) 5251 5252 uploads_in_blob = True 5253 types = { 5254 'boolean': bool, 5255 'string': str, 5256 'text': str, 5257 'json': str, 5258 'password': str, 5259 'blob': str, 5260 'upload': str, 5261 'integer': long, 5262 'bigint': long, 5263 'float': float, 5264 'double': float, 5265 'date': datetime.date, 5266 'time': datetime.time, 5267 'datetime': datetime.datetime, 5268 'id': long, 5269 'reference': long, 5270 'list:string': list, 5271 'list:integer': list, 5272 'list:reference': list, 5273 } 5274
5275 - def file_exists(self, filename): pass
5276 - def file_open(self, filename, mode='rb', lock=True): pass
5277 - def file_close(self, fileobj): pass
5278
5279 - def expand(self,expression,field_type=None):
5280 if isinstance(expression,Field): 5281 if expression.type=='id': 5282 return "%s._id" % expression.tablename 5283 return BaseAdapter.expand(self,expression,field_type)
5284
5285 - def AND(self,first,second):
5286 return '(%s && %s)' % (self.expand(first),self.expand(second))
5287
5288 - def OR(self,first,second):
5289 return '(%s || %s)' % (self.expand(first),self.expand(second))
5290
5291 - def EQ(self,first,second):
5292 if second is None: 5293 return '(%s == null)' % self.expand(first) 5294 return '(%s == %s)' % (self.expand(first),self.expand(second,first.type))
5295
5296 - def NE(self,first,second):
5297 if second is None: 5298 return '(%s != null)' % self.expand(first) 5299 return '(%s != %s)' % (self.expand(first),self.expand(second,first.type))
5300
5301 - def COMMA(self,first,second):
5302 return '%s + %s' % (self.expand(first),self.expand(second))
5303
5304 - def represent(self, obj, fieldtype):
5305 value = NoSQLAdapter.represent(self, obj, fieldtype) 5306 if fieldtype=='id': 5307 return repr(str(long(value))) 5308 elif fieldtype in ('date','time','datetime','boolean'): 5309 return serializers.json(value) 5310 return repr(not isinstance(value,unicode) and value \ 5311 or value and value.encode('utf8'))
5312
5313 - def __init__(self,db,uri='couchdb://127.0.0.1:5984', 5314 pool_size=0,folder=None,db_codec ='UTF-8', 5315 credential_decoder=IDENTITY, driver_args={}, 5316 adapter_args={}, do_connect=True, after_connection=None):
5317 self.db = db 5318 self.uri = uri 5319 if do_connect: self.find_driver(adapter_args) 5320 self.dbengine = 'couchdb' 5321 self.folder = folder 5322 db['_lastsql'] = '' 5323 self.db_codec = 'UTF-8' 5324 self._after_connection = after_connection 5325 self.pool_size = pool_size 5326 5327 url='http://'+uri[10:] 5328 def connector(url=url,driver_args=driver_args): 5329 return self.driver.Server(url,**driver_args)
5330 self.reconnect(connector,cursor=False)
5331
5332 - def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None):
5333 if migrate: 5334 try: 5335 self.connection.create(table._tablename) 5336 except: 5337 pass
5338
5339 - def insert(self,table,fields):
5340 id = uuid2int(web2py_uuid()) 5341 ctable = self.connection[table._tablename] 5342 values = dict((k.name,self.represent(v,k.type)) for k,v in fields) 5343 values['_id'] = str(id) 5344 ctable.save(values) 5345 return id
5346
5347 - def _select(self,query,fields,attributes):
5348 if not isinstance(query,Query): 5349 raise SyntaxError("Not Supported") 5350 for key in set(attributes.keys())-SELECT_ARGS: 5351 raise SyntaxError('invalid select attribute: %s' % key) 5352 new_fields=[] 5353 for item in fields: 5354 if isinstance(item,SQLALL): 5355 new_fields += item._table 5356 else: 5357 new_fields.append(item) 5358 def uid(fd): 5359 return fd=='id' and '_id' or fd
5360 def get(row,fd): 5361 return fd=='id' and long(row['_id']) or row.get(fd,None) 5362 fields = new_fields 5363 tablename = self.get_table(query) 5364 fieldnames = [f.name for f in (fields or self.db[tablename])] 5365 colnames = ['%s.%s' % (tablename,k) for k in fieldnames] 5366 fields = ','.join(['%s.%s' % (tablename,uid(f)) for f in fieldnames]) 5367 fn="(function(%(t)s){if(%(query)s)emit(%(order)s,[%(fields)s]);})" %\ 5368 dict(t=tablename, 5369 query=self.expand(query), 5370 order='%s._id' % tablename, 5371 fields=fields) 5372 return fn, colnames 5373
5374 - def select(self,query,fields,attributes):
5375 if not isinstance(query,Query): 5376 raise SyntaxError("Not Supported") 5377 fn, colnames = self._select(query,fields,attributes) 5378 tablename = colnames[0].split('.')[0] 5379 ctable = self.connection[tablename] 5380 rows = [cols['value'] for cols in ctable.query(fn)] 5381 processor = attributes.get('processor',self.parse) 5382 return processor(rows,fields,colnames,False)
5383
5384 - def delete(self,tablename,query):
5385 if not isinstance(query,Query): 5386 raise SyntaxError("Not Supported") 5387 if query.first.type=='id' and query.op==self.EQ: 5388 id = query.second 5389 tablename = query.first.tablename 5390 assert(tablename == query.first.tablename) 5391 ctable = self.connection[tablename] 5392 try: 5393 del ctable[str(id)] 5394 return 1 5395 except couchdb.http.ResourceNotFound: 5396 return 0 5397 else: 5398 tablename = self.get_table(query) 5399 rows = self.select(query,[self.db[tablename]._id],{}) 5400 ctable = self.connection[tablename] 5401 for row in rows: 5402 del ctable[str(row.id)] 5403 return len(rows)
5404
5405 - def update(self,tablename,query,fields):
5406 if not isinstance(query,Query): 5407 raise SyntaxError("Not Supported") 5408 if query.first.type=='id' and query.op==self.EQ: 5409 id = query.second 5410 tablename = query.first.tablename 5411 ctable = self.connection[tablename] 5412 try: 5413 doc = ctable[str(id)] 5414 for key,value in fields: 5415 doc[key.name] = self.represent(value,self.db[tablename][key.name].type) 5416 ctable.save(doc) 5417 return 1 5418 except couchdb.http.ResourceNotFound: 5419 return 0 5420 else: 5421 tablename = self.get_table(query) 5422 rows = self.select(query,[self.db[tablename]._id],{}) 5423 ctable = self.connection[tablename] 5424 table = self.db[tablename] 5425 for row in rows: 5426 doc = ctable[str(row.id)] 5427 for key,value in fields: 5428 doc[key.name] = self.represent(value,table[key.name].type) 5429 ctable.save(doc) 5430 return len(rows)
5431
5432 - def count(self,query,distinct=None):
5433 if distinct: 5434 raise RuntimeError("COUNT DISTINCT not supported") 5435 if not isinstance(query,Query): 5436 raise SyntaxError("Not Supported") 5437 tablename = self.get_table(query) 5438 rows = self.select(query,[self.db[tablename]._id],{}) 5439 return len(rows)
5440
5441 -def cleanup(text):
5442 """ 5443 validates that the given text is clean: only contains [0-9a-zA-Z_] 5444 """ 5445 if not REGEX_ALPHANUMERIC.match(text): 5446 raise SyntaxError('invalid table or field name: %s' % text) 5447 return text
5448
5449 -class MongoDBAdapter(NoSQLAdapter):
5450 native_json = True 5451 drivers = ('pymongo',) 5452 5453 uploads_in_blob = False 5454 5455 types = { 5456 'boolean': bool, 5457 'string': str, 5458 'text': str, 5459 'json': str, 5460 'password': str, 5461 'blob': str, 5462 'upload': str, 5463 'integer': long, 5464 'bigint': long, 5465 'float': float, 5466 'double': float, 5467 'date': datetime.date, 5468 'time': datetime.time, 5469 'datetime': datetime.datetime, 5470 'id': long, 5471 'reference': long, 5472 'list:string': list, 5473 'list:integer': list, 5474 'list:reference': list, 5475 } 5476 5477 error_messages = {"javascript_needed": "This must yet be replaced" + 5478 " with javascript in order to work."} 5479
5480 - def __init__(self,db,uri='mongodb://127.0.0.1:5984/db', 5481 pool_size=0, folder=None, db_codec ='UTF-8', 5482 credential_decoder=IDENTITY, driver_args={}, 5483 adapter_args={}, do_connect=True, after_connection=None):
5484 5485 self.db = db 5486 self.uri = uri 5487 if do_connect: self.find_driver(adapter_args) 5488 import random 5489 from bson.objectid import ObjectId 5490 from bson.son import SON 5491 import pymongo.uri_parser 5492 5493 m = pymongo.uri_parser.parse_uri(uri) 5494 5495 self.SON = SON 5496 self.ObjectId = ObjectId 5497 self.random = random 5498 5499 self.dbengine = 'mongodb' 5500 self.folder = folder 5501 db['_lastsql'] = '' 5502 self.db_codec = 'UTF-8' 5503 self._after_connection = after_connection 5504 self.pool_size = pool_size 5505 #this is the minimum amount of replicates that it should wait 5506 # for on insert/update 5507 self.minimumreplication = adapter_args.get('minimumreplication',0) 5508 # by default all inserts and selects are performand asynchronous, 5509 # but now the default is 5510 # synchronous, except when overruled by either this default or 5511 # function parameter 5512 self.safe = adapter_args.get('safe',True) 5513 # load user setting for uploads in blob storage 5514 self.uploads_in_blob = adapter_args.get('uploads_in_blob', False) 5515 5516 if isinstance(m,tuple): 5517 m = {"database" : m[1]} 5518 if m.get('database')==None: 5519 raise SyntaxError("Database is required!") 5520 5521 def connector(uri=self.uri,m=m): 5522 # Connection() is deprecated 5523 if hasattr(self.driver, "MongoClient"): 5524 Connection = self.driver.MongoClient 5525 else: 5526 Connection = self.driver.Connection 5527 return Connection(uri)[m.get('database')]
5528 5529 self.reconnect(connector,cursor=False)
5530
5531 - def object_id(self, arg=None):
5532 """ Convert input to a valid Mongodb ObjectId instance 5533 5534 self.object_id("<random>") -> ObjectId (not unique) instance """ 5535 if not arg: 5536 arg = 0 5537 if isinstance(arg, basestring): 5538 # we assume an integer as default input 5539 rawhex = len(arg.replace("0x", "").replace("L", "")) == 24 5540 if arg.isdigit() and (not rawhex): 5541 arg = int(arg) 5542 elif arg == "<random>": 5543 arg = int("0x%sL" % \ 5544 "".join([self.random.choice("0123456789abcdef") \ 5545 for x in range(24)]), 0) 5546 elif arg.isalnum(): 5547 if not arg.startswith("0x"): 5548 arg = "0x%s" % arg 5549 try: 5550 arg = int(arg, 0) 5551 except ValueError, e: 5552 raise ValueError( 5553 "invalid objectid argument string: %s" % e) 5554 else: 5555 raise ValueError("Invalid objectid argument string. " + 5556 "Requires an integer or base 16 value") 5557 elif isinstance(arg, self.ObjectId): 5558 return arg 5559 5560 if not isinstance(arg, (int, long)): 5561 raise TypeError("object_id argument must be of type " + 5562 "ObjectId or an objectid representable integer") 5563 hexvalue = hex(arg)[2:].rstrip('L').zfill(24) 5564 return self.ObjectId(hexvalue)
5565
5566 - def parse_reference(self, value, field_type):
5567 # here we have to check for ObjectID before base parse 5568 if isinstance(value, self.ObjectId): 5569 value = long(str(value), 16) 5570 return super(MongoDBAdapter, 5571 self).parse_reference(value, field_type)
5572
5573 - def parse_id(self, value, field_type):
5574 if isinstance(value, self.ObjectId): 5575 value = long(str(value), 16) 5576 return super(MongoDBAdapter, 5577 self).parse_id(value, field_type)
5578
5579 - def represent(self, obj, fieldtype):
5580 # the base adatpter does not support MongoDB ObjectId 5581 if isinstance(obj, self.ObjectId): 5582 value = obj 5583 else: 5584 value = NoSQLAdapter.represent(self, obj, fieldtype) 5585 if isinstance(obj, (list, tuple)) and \ 5586 (not fieldtype == "json" or fieldtype.startswith('list:')): 5587 return value 5588 # reference types must be convert to ObjectID 5589 if fieldtype =='date': 5590 if value == None: 5591 return value 5592 # this piece of data can be stripped off based on the fieldtype 5593 t = datetime.time(0, 0, 0) 5594 # mongodb doesn't has a date object and so it must datetime, 5595 # string or integer 5596 return datetime.datetime.combine(value, t) 5597 elif fieldtype == 'time': 5598 if value == None: 5599 return value 5600 # this piece of data can be stripped of based on the fieldtype 5601 d = datetime.date(2000, 1, 1) 5602 # mongodb doesn't has a time object and so it must datetime, 5603 # string or integer 5604 return datetime.datetime.combine(d, value) 5605 elif fieldtype == "blob": 5606 if value== None: 5607 return value 5608 from bson import Binary 5609 if not isinstance(value, Binary): 5610 if not isinstance(value, basestring): 5611 return Binary(str(value)) 5612 return Binary(value) 5613 return value 5614 elif (isinstance(fieldtype, basestring) and 5615 fieldtype.startswith('list:')): 5616 if fieldtype.startswith('list:reference'): 5617 newval = [] 5618 for v in value: 5619 newval.append(self.object_id(v)) 5620 return newval 5621 return value 5622 elif ((isinstance(fieldtype, basestring) and 5623 fieldtype.startswith("reference")) or 5624 (isinstance(fieldtype, Table)) or fieldtype=="id"): 5625 value = self.object_id(value) 5626 return value
5627
5628 - def create_table(self, table, migrate=True, fake_migrate=False, 5629 polymodel=None, isCapped=False):
5630 if isCapped: 5631 raise RuntimeError("Not implemented")
5632
5633 - def count(self, query, distinct=None, snapshot=True):
5634 if distinct: 5635 raise RuntimeError("COUNT DISTINCT not supported") 5636 if not isinstance(query,Query): 5637 raise SyntaxError("Not Supported") 5638 tablename = self.get_table(query) 5639 return long(self.select(query,[self.db[tablename]._id], {}, 5640 count=True,snapshot=snapshot)['count'])
5641 # Maybe it would be faster if we just implemented the pymongo 5642 # .count() function which is probably quicker? 5643 # therefor call __select() connection[table].find(query).count() 5644 # Since this will probably reduce the return set? 5645
5646 - def expand(self, expression, field_type=None):
5647 if isinstance(expression, Query): 5648 # any query using 'id':= 5649 # set name as _id (as per pymongo/mongodb primary key) 5650 # convert second arg to an objectid field 5651 # (if its not already) 5652 # if second arg is 0 convert to objectid 5653 if isinstance(expression.first,Field) and \ 5654 ((expression.first.type == 'id') or \ 5655 ("reference" in expression.first.type)): 5656 if expression.first.type == 'id': 5657 expression.first.name = '_id' 5658 # cast to Mongo ObjectId 5659 if isinstance(expression.second, (tuple, list, set)): 5660 expression.second = [self.object_id(item) for 5661 item in expression.second] 5662 else: 5663 expression.second = self.object_id(expression.second) 5664 result = expression.op(expression.first, expression.second) 5665 5666 if isinstance(expression, Field): 5667 if expression.type=='id': 5668 result = "_id" 5669 else: 5670 result = expression.name 5671 elif isinstance(expression, (Expression, Query)): 5672 if not expression.second is None: 5673 result = expression.op(expression.first, expression.second) 5674 elif not expression.first is None: 5675 result = expression.op(expression.first) 5676 elif not isinstance(expression.op, str): 5677 result = expression.op() 5678 else: 5679 result = expression.op 5680 elif field_type: 5681 result = self.represent(expression,field_type) 5682 elif isinstance(expression,(list,tuple)): 5683 result = ','.join(self.represent(item,field_type) for 5684 item in expression) 5685 else: 5686 result = expression 5687 return result
5688
5689 - def drop(self, table, mode=''):
5690 ctable = self.connection[table._tablename] 5691 ctable.drop()
5692
5693 - def truncate(self, table, mode, safe=None):
5694 if safe == None: 5695 safe=self.safe 5696 ctable = self.connection[table._tablename] 5697 ctable.remove(None, safe=True)
5698
5699 - def _select(self, query, fields, attributes):
5700 if 'for_update' in attributes: 5701 logging.warn('mongodb does not support for_update') 5702 for key in set(attributes.keys())-set(('limitby', 5703 'orderby','for_update')): 5704 if attributes[key]!=None: 5705 logging.warn('select attribute not implemented: %s' % key) 5706 5707 new_fields=[] 5708 mongosort_list = [] 5709 5710 # try an orderby attribute 5711 orderby = attributes.get('orderby', False) 5712 limitby = attributes.get('limitby', False) 5713 # distinct = attributes.get('distinct', False) 5714 if orderby: 5715 if isinstance(orderby, (list, tuple)): 5716 orderby = xorify(orderby) 5717 5718 # !!!! need to add 'random' 5719 for f in self.expand(orderby).split(','): 5720 if f.startswith('-'): 5721 mongosort_list.append((f[1:], -1)) 5722 else: 5723 mongosort_list.append((f, 1)) 5724 if limitby: 5725 limitby_skip, limitby_limit = limitby[0], int(limitby[1]) 5726 else: 5727 limitby_skip = limitby_limit = 0 5728 5729 mongofields_dict = self.SON() 5730 mongoqry_dict = {} 5731 for item in fields: 5732 if isinstance(item, SQLALL): 5733 new_fields += item._table 5734 else: 5735 new_fields.append(item) 5736 fields = new_fields 5737 if isinstance(query,Query): 5738 tablename = self.get_table(query) 5739 elif len(fields) != 0: 5740 tablename = fields[0].tablename 5741 else: 5742 raise SyntaxError("The table name could not be found in " + 5743 "the query nor from the select statement.") 5744 mongoqry_dict = self.expand(query) 5745 fields = fields or self.db[tablename] 5746 for field in fields: 5747 mongofields_dict[field.name] = 1 5748 5749 return tablename, mongoqry_dict, mongofields_dict, mongosort_list, \ 5750 limitby_limit, limitby_skip
5751
5752 - def select(self, query, fields, attributes, count=False, 5753 snapshot=False):
5754 # TODO: support joins 5755 tablename, mongoqry_dict, mongofields_dict, mongosort_list, \ 5756 limitby_limit, limitby_skip = self._select(query, fields, attributes) 5757 ctable = self.connection[tablename] 5758 5759 if count: 5760 return {'count' : ctable.find( 5761 mongoqry_dict, mongofields_dict, 5762 skip=limitby_skip, limit=limitby_limit, 5763 sort=mongosort_list, snapshot=snapshot).count()} 5764 else: 5765 # pymongo cursor object 5766 mongo_list_dicts = ctable.find(mongoqry_dict, 5767 mongofields_dict, skip=limitby_skip, 5768 limit=limitby_limit, sort=mongosort_list, 5769 snapshot=snapshot) 5770 rows = [] 5771 # populate row in proper order 5772 # Here we replace ._id with .id to follow the standard naming 5773 colnames = [] 5774 newnames = [] 5775 for field in fields: 5776 colname = str(field) 5777 colnames.append(colname) 5778 tablename, fieldname = colname.split(".") 5779 if fieldname == "_id": 5780 # Mongodb reserved uuid key 5781 field.name = "id" 5782 newnames.append(".".join((tablename, field.name))) 5783 5784 for record in mongo_list_dicts: 5785 row=[] 5786 for colname in colnames: 5787 tablename, fieldname = colname.split(".") 5788 # switch to Mongo _id uuids for retrieving 5789 # record id's 5790 if fieldname == "id": fieldname = "_id" 5791 if fieldname in record: 5792 value = record[fieldname] 5793 else: 5794 value = None 5795 row.append(value) 5796 rows.append(row) 5797 5798 processor = attributes.get('processor', self.parse) 5799 result = processor(rows, fields, newnames, False) 5800 return result
5801
5802 - def _insert(self, table, fields):
5803 values = dict() 5804 for k, v in fields: 5805 if not k.name in ["id", "safe"]: 5806 fieldname = k.name 5807 fieldtype = table[k.name].type 5808 values[fieldname] = self.represent(v, fieldtype) 5809 return values
5810 5811 # Safe determines whether a asynchronious request is done or a 5812 # synchronious action is done 5813 # For safety, we use by default synchronous requests
5814 - def insert(self, table, fields, safe=None):
5815 if safe==None: 5816 safe = self.safe 5817 ctable = self.connection[table._tablename] 5818 values = self._insert(table, fields) 5819 ctable.insert(values, safe=safe) 5820 return long(str(values['_id']), 16)
5821 5822 #this function returns a dict with the where clause and update fields
5823 - def _update(self, tablename, query, fields):
5824 if not isinstance(query, Query): 5825 raise SyntaxError("Not Supported") 5826 filter = None 5827 if query: 5828 filter = self.expand(query) 5829 # do not try to update id fields to avoid backend errors 5830 modify = {'$set': dict((k.name, self.represent(v, k.type)) for 5831 k, v in fields if (not k.name in ("_id", "id")))} 5832 return modify, filter
5833
5834 - def update(self, tablename, query, fields, safe=None):
5835 if safe == None: 5836 safe = self.safe 5837 # return amount of adjusted rows or zero, but no exceptions 5838 # @ related not finding the result 5839 if not isinstance(query, Query): 5840 raise RuntimeError("Not implemented") 5841 amount = self.count(query, False) 5842 modify, filter = self._update(tablename, query, fields) 5843 try: 5844 result = self.connection[tablename].update(filter, 5845 modify, multi=True, safe=safe) 5846 if safe: 5847 try: 5848 # if result count is available fetch it 5849 return result["n"] 5850 except (KeyError, AttributeError, TypeError): 5851 return amount 5852 else: 5853 return amount 5854 except Exception, e: 5855 # TODO Reverse update query to verifiy that the query succeded 5856 raise RuntimeError("uncaught exception when updating rows: %s" % e)
5857
5858 - def _delete(self, tablename, query):
5859 if not isinstance(query, Query): 5860 raise RuntimeError("query type %s is not supported" % \ 5861 type(query)) 5862 return self.expand(query)
5863
5864 - def delete(self, tablename, query, safe=None):
5865 if safe is None: 5866 safe = self.safe 5867 amount = 0 5868 amount = self.count(query, False) 5869 filter = self._delete(tablename, query) 5870 self.connection[tablename].remove(filter, safe=safe) 5871 return amount
5872
5873 - def bulk_insert(self, table, items):
5874 return [self.insert(table,item) for item in items]
5875 5876 ## OPERATORS
5877 - def INVERT(self, first):
5878 #print "in invert first=%s" % first 5879 return '-%s' % self.expand(first)
5880 5881 # TODO This will probably not work:(
5882 - def NOT(self, first):
5883 return {'$not': self.expand(first)}
5884
5885 - def AND(self,first,second):
5886 # pymongo expects: .find({'$and': [{'x':'1'}, {'y':'2'}]}) 5887 return {'$and': [self.expand(first),self.expand(second)]}
5888
5889 - def OR(self,first,second):
5890 # pymongo expects: .find({'$or': [{'name':'1'}, {'name':'2'}]}) 5891 return {'$or': [self.expand(first),self.expand(second)]}
5892
5893 - def BELONGS(self, first, second):
5894 if isinstance(second, str): 5895 return {self.expand(first) : {"$in" : [ second[:-1]]} } 5896 elif second==[] or second==() or second==set(): 5897 return {1:0} 5898 items = [self.expand(item, first.type) for item in second] 5899 return {self.expand(first) : {"$in" : items} }
5900
5901 - def EQ(self,first,second=None):
5902 result = {} 5903 result[self.expand(first)] = self.expand(second) 5904 return result
5905
5906 - def NE(self, first, second=None):
5907 result = {} 5908 result[self.expand(first)] = {'$ne': self.expand(second)} 5909 return result
5910
5911 - def LT(self,first,second=None):
5912 if second is None: 5913 raise RuntimeError("Cannot compare %s < None" % first) 5914 result = {} 5915 result[self.expand(first)] = {'$lt': self.expand(second)} 5916 return result
5917
5918 - def LE(self,first,second=None):
5919 if second is None: 5920 raise RuntimeError("Cannot compare %s <= None" % first) 5921 result = {} 5922 result[self.expand(first)] = {'$lte': self.expand(second)} 5923 return result
5924
5925 - def GT(self,first,second):
5926 result = {} 5927 result[self.expand(first)] = {'$gt': self.expand(second)} 5928 return result
5929
5930 - def GE(self,first,second=None):
5931 if second is None: 5932 raise RuntimeError("Cannot compare %s >= None" % first) 5933 result = {} 5934 result[self.expand(first)] = {'$gte': self.expand(second)} 5935 return result
5936
5937 - def ADD(self, first, second):
5938 raise NotImplementedError(self.error_messages["javascript_needed"]) 5939 return '%s + %s' % (self.expand(first), 5940 self.expand(second, first.type))
5941
5942 - def SUB(self, first, second):
5943 raise NotImplementedError(self.error_messages["javascript_needed"]) 5944 return '(%s - %s)' % (self.expand(first), 5945 self.expand(second, first.type))
5946
5947 - def MUL(self, first, second):
5948 raise NotImplementedError(self.error_messages["javascript_needed"]) 5949 return '(%s * %s)' % (self.expand(first), 5950 self.expand(second, first.type))
5951
5952 - def DIV(self, first, second):
5953 raise NotImplementedError(self.error_messages["javascript_needed"]) 5954 return '(%s / %s)' % (self.expand(first), 5955 self.expand(second, first.type))
5956
5957 - def MOD(self, first, second):
5958 raise NotImplementedError(self.error_messages["javascript_needed"]) 5959 return '(%s %% %s)' % (self.expand(first), 5960 self.expand(second, first.type))
5961
5962 - def AS(self, first, second):
5963 raise NotImplementedError(self.error_messages["javascript_needed"]) 5964 return '%s AS %s' % (self.expand(first), second)
5965 5966 # We could implement an option that simulates a full featured SQL 5967 # database. But I think the option should be set explicit or 5968 # implemented as another library.
5969 - def ON(self, first, second):
5970 raise NotImplementedError("This is not possible in NoSQL" + 5971 " but can be simulated with a wrapper.") 5972 return '%s ON %s' % (self.expand(first), self.expand(second))
5973 5974 # BLOW ARE TWO IMPLEMENTATIONS OF THE SAME FUNCITONS 5975 # WHICH ONE IS BEST? 5976
5977 - def COMMA(self, first, second):
5978 return '%s, %s' % (self.expand(first), self.expand(second))
5979
5980 - def LIKE(self, first, second):
5981 #escaping regex operators? 5982 return {self.expand(first): ('%s' % \ 5983 self.expand(second, 'string').replace('%','/'))}
5984
5985 - def ILIKE(self, first, second):
5986 val = second if isinstance(second,self.ObjectId) else { 5987 '$regex': second.replace('%', ''), '$options': 'i'} 5988 return {self.expand(first): val}
5989
5990 - def STARTSWITH(self, first, second):
5991 #escaping regex operators? 5992 return {self.expand(first): ('/^%s/' % \ 5993 self.expand(second, 'string'))}
5994
5995 - def ENDSWITH(self, first, second):
5996 #escaping regex operators? 5997 return {self.expand(first): ('/%s^/' % \ 5998 self.expand(second, 'string'))}
5999
6000 - def CONTAINS(self, first, second, case_sensitive=False):
6001 # silently ignore, only case sensitive 6002 # There is a technical difference, but mongodb doesn't support 6003 # that, but the result will be the same 6004 val = second if isinstance(second,self.ObjectId) else \ 6005 {'$regex':".*" + re.escape(self.expand(second, 'string')) + ".*"} 6006 return {self.expand(first) : val}
6007
6008 - def LIKE(self, first, second):
6009 import re 6010 return {self.expand(first): {'$regex': \ 6011 re.escape(self.expand(second, 6012 'string')).replace('%','.*')}}
6013 6014 #TODO verify full compatibilty with official SQL Like operator
6015 - def STARTSWITH(self, first, second):
6016 #TODO Solve almost the same problem as with endswith 6017 import re 6018 return {self.expand(first): {'$regex' : '^' + 6019 re.escape(self.expand(second, 6020 'string'))}}
6021 6022 #TODO verify full compatibilty with official SQL Like operator
6023 - def ENDSWITH(self, first, second):
6024 #escaping regex operators? 6025 #TODO if searched for a name like zsa_corbitt and the function 6026 # is endswith('a') then this is also returned. 6027 # Aldo it end with a t 6028 import re 6029 return {self.expand(first): {'$regex': \ 6030 re.escape(self.expand(second, 'string')) + '$'}}
6031 6032 #TODO verify full compatibilty with official oracle contains operator
6033 - def CONTAINS(self, first, second, case_sensitive=False):
6034 # silently ignore, only case sensitive 6035 #There is a technical difference, but mongodb doesn't support 6036 # that, but the result will be the same 6037 #TODO contains operators need to be transformed to Regex 6038 return {self.expand(first) : {'$regex': \ 6039 ".*" + re.escape(self.expand(second, 'string')) + ".*"}}
6040
6041 6042 -class IMAPAdapter(NoSQLAdapter):
6043 drivers = ('imaplib',) 6044 6045 """ IMAP server adapter 6046 6047 This class is intended as an interface with 6048 email IMAP servers to perform simple queries in the 6049 web2py DAL query syntax, so email read, search and 6050 other related IMAP mail services (as those implemented 6051 by brands like Google(r), and Yahoo!(r) 6052 can be managed from web2py applications. 6053 6054 The code uses examples by Yuji Tomita on this post: 6055 http://yuji.wordpress.com/2011/06/22/python-imaplib-imap-example-with-gmail/#comment-1137 6056 and is based in docs for Python imaplib, python email 6057 and email IETF's (i.e. RFC2060 and RFC3501) 6058 6059 This adapter was tested with a small set of operations with Gmail(r). Other 6060 services requests could raise command syntax and response data issues. 6061 6062 It creates its table and field names "statically", 6063 meaning that the developer should leave the table and field 6064 definitions to the DAL instance by calling the adapter's 6065 .define_tables() method. The tables are defined with the 6066 IMAP server mailbox list information. 6067 6068 .define_tables() returns a dictionary mapping dal tablenames 6069 to the server mailbox names with the following structure: 6070 6071 {<tablename>: str <server mailbox name>} 6072 6073 Here is a list of supported fields: 6074 6075 Field Type Description 6076 ################################################################ 6077 uid string 6078 answered boolean Flag 6079 created date 6080 content list:string A list of dict text or html parts 6081 to string 6082 cc string 6083 bcc string 6084 size integer the amount of octets of the message* 6085 deleted boolean Flag 6086 draft boolean Flag 6087 flagged boolean Flag 6088 sender string 6089 recent boolean Flag 6090 seen boolean Flag 6091 subject string 6092 mime string The mime header declaration 6093 email string The complete RFC822 message** 6094 attachments <type list> Each non text part as dict 6095 encoding string The main detected encoding 6096 6097 *At the application side it is measured as the length of the RFC822 6098 message string 6099 6100 WARNING: As row id's are mapped to email sequence numbers, 6101 make sure your imap client web2py app does not delete messages 6102 during select or update actions, to prevent 6103 updating or deleting different messages. 6104 Sequence numbers change whenever the mailbox is updated. 6105 To avoid this sequence numbers issues, it is recommended the use 6106 of uid fields in query references (although the update and delete 6107 in separate actions rule still applies). 6108 6109 # This is the code recommended to start imap support 6110 # at the app's model: 6111 6112 imapdb = DAL("imap://user:password@server:port", pool_size=1) # port 993 for ssl 6113 imapdb.define_tables() 6114 6115 Here is an (incomplete) list of possible imap commands: 6116 6117 # Count today's unseen messages 6118 # smaller than 6000 octets from the 6119 # inbox mailbox 6120 6121 q = imapdb.INBOX.seen == False 6122 q &= imapdb.INBOX.created == datetime.date.today() 6123 q &= imapdb.INBOX.size < 6000 6124 unread = imapdb(q).count() 6125 6126 # Fetch last query messages 6127 rows = imapdb(q).select() 6128 6129 # it is also possible to filter query select results with limitby and 6130 # sequences of mailbox fields 6131 6132 set.select(<fields sequence>, limitby=(<int>, <int>)) 6133 6134 # Mark last query messages as seen 6135 messages = [row.uid for row in rows] 6136 seen = imapdb(imapdb.INBOX.uid.belongs(messages)).update(seen=True) 6137 6138 # Delete messages in the imap database that have mails from mr. Gumby 6139 6140 deleted = 0 6141 for mailbox in imapdb.tables 6142 deleted += imapdb(imapdb[mailbox].sender.contains("gumby")).delete() 6143 6144 # It is possible also to mark messages for deletion instead of ereasing them 6145 # directly with set.update(deleted=True) 6146 6147 6148 # This object give access 6149 # to the adapter auto mailbox 6150 # mapped names (which native 6151 # mailbox has what table name) 6152 6153 imapdb.mailboxes <dict> # tablename, server native name pairs 6154 6155 # To retrieve a table native mailbox name use: 6156 imapdb.<table>.mailbox 6157 6158 ### New features v2.4.1: 6159 6160 # Declare mailboxes statically with tablename, name pairs 6161 # This avoids the extra server names retrieval 6162 6163 imapdb.define_tables({"inbox": "INBOX"}) 6164 6165 # Selects without content/attachments/email columns will only 6166 # fetch header and flags 6167 6168 imapdb(q).select(imapdb.INBOX.sender, imapdb.INBOX.subject) 6169 """ 6170 6171 types = { 6172 'string': str, 6173 'text': str, 6174 'date': datetime.date, 6175 'datetime': datetime.datetime, 6176 'id': long, 6177 'boolean': bool, 6178 'integer': int, 6179 'bigint': long, 6180 'blob': str, 6181 'list:string': str, 6182 } 6183 6184 dbengine = 'imap' 6185 6186 REGEX_URI = re.compile('^(?P<user>[^:]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?$') 6187
6188 - def __init__(self, 6189 db, 6190 uri, 6191 pool_size=0, 6192 folder=None, 6193 db_codec ='UTF-8', 6194 credential_decoder=IDENTITY, 6195 driver_args={}, 6196 adapter_args={}, 6197 do_connect=True, 6198 after_connection=None):
6199 6200 # db uri: user@example.com:password@imap.server.com:123 6201 # TODO: max size adapter argument for preventing large mail transfers 6202 6203 self.db = db 6204 self.uri = uri 6205 if do_connect: self.find_driver(adapter_args) 6206 self.pool_size=pool_size 6207 self.folder = folder 6208 self.db_codec = db_codec 6209 self._after_connection = after_connection 6210 self.credential_decoder = credential_decoder 6211 self.driver_args = driver_args 6212 self.adapter_args = adapter_args 6213 self.mailbox_size = None 6214 self.static_names = None 6215 self.charset = sys.getfilesystemencoding() 6216 # imap class 6217 self.imap4 = None 6218 uri = uri.split("://")[1] 6219 6220 """ MESSAGE is an identifier for sequence number""" 6221 6222 self.flags = {'deleted': '\\Deleted', 'draft': '\\Draft', 6223 'flagged': '\\Flagged', 'recent': '\\Recent', 6224 'seen': '\\Seen', 'answered': '\\Answered'} 6225 self.search_fields = { 6226 'id': 'MESSAGE', 'created': 'DATE', 6227 'uid': 'UID', 'sender': 'FROM', 6228 'to': 'TO', 'cc': 'CC', 6229 'bcc': 'BCC', 'content': 'TEXT', 6230 'size': 'SIZE', 'deleted': '\\Deleted', 6231 'draft': '\\Draft', 'flagged': '\\Flagged', 6232 'recent': '\\Recent', 'seen': '\\Seen', 6233 'subject': 'SUBJECT', 'answered': '\\Answered', 6234 'mime': None, 'email': None, 6235 'attachments': None 6236 } 6237 6238 db['_lastsql'] = '' 6239 6240 m = self.REGEX_URI.match(uri) 6241 user = m.group('user') 6242 password = m.group('password') 6243 host = m.group('host') 6244 port = int(m.group('port')) 6245 over_ssl = False 6246 if port==993: 6247 over_ssl = True 6248 6249 driver_args.update(host=host,port=port, password=password, user=user) 6250 def connector(driver_args=driver_args): 6251 # it is assumed sucessful authentication alLways 6252 # TODO: support direct connection and login tests 6253 if over_ssl: 6254 self.imap4 = self.driver.IMAP4_SSL 6255 else: 6256 self.imap4 = self.driver.IMAP4 6257 connection = self.imap4(driver_args["host"], driver_args["port"]) 6258 data = connection.login(driver_args["user"], driver_args["password"]) 6259 6260 # static mailbox list 6261 connection.mailbox_names = None 6262 6263 # dummy cursor function 6264 connection.cursor = lambda : True 6265 6266 return connection
6267 6268 self.db.define_tables = self.define_tables 6269 self.connector = connector 6270 if do_connect: self.reconnect()
6271
6272 - def reconnect(self, f=None, cursor=True):
6273 """ 6274 IMAP4 Pool connection method 6275 6276 imap connection lacks of self cursor command. 6277 A custom command should be provided as a replacement 6278 for connection pooling to prevent uncaught remote session 6279 closing 6280 6281 """ 6282 if getattr(self,'connection',None) != None: 6283 return 6284 if f is None: 6285 f = self.connector 6286 6287 if not self.pool_size: 6288 self.connection = f() 6289 self.cursor = cursor and self.connection.cursor() 6290 else: 6291 POOLS = ConnectionPool.POOLS 6292 uri = self.uri 6293 while True: 6294 GLOBAL_LOCKER.acquire() 6295 if not uri in POOLS: 6296 POOLS[uri] = [] 6297 if POOLS[uri]: 6298 self.connection = POOLS[uri].pop() 6299 GLOBAL_LOCKER.release() 6300 self.cursor = cursor and self.connection.cursor() 6301 if self.cursor and self.check_active_connection: 6302 try: 6303 # check if connection is alive or close it 6304 result, data = self.connection.list() 6305 except: 6306 # Possible connection reset error 6307 # TODO: read exception class 6308 self.connection = f() 6309 break 6310 else: 6311 GLOBAL_LOCKER.release() 6312 self.connection = f() 6313 self.cursor = cursor and self.connection.cursor() 6314 break 6315 self.after_connection_hook()
6316
6317 - def get_last_message(self, tablename):
6318 last_message = None 6319 # request mailbox list to the server if needed. 6320 if not isinstance(self.connection.mailbox_names, dict): 6321 self.get_mailboxes() 6322 try: 6323 result = self.connection.select( 6324 self.connection.mailbox_names[tablename]) 6325 last_message = int(result[1][0]) 6326 # Last message must be a positive integer 6327 if last_message == 0: 6328 last_message = 1 6329 except (IndexError, ValueError, TypeError, KeyError): 6330 e = sys.exc_info()[1] 6331 LOGGER.debug("Error retrieving the last mailbox" + 6332 " sequence number. %s" % str(e)) 6333 return last_message
6334
6335 - def get_uid_bounds(self, tablename):
6336 if not isinstance(self.connection.mailbox_names, dict): 6337 self.get_mailboxes() 6338 # fetch first and last messages 6339 # return (first, last) messages uid's 6340 last_message = self.get_last_message(tablename) 6341 result, data = self.connection.uid("search", None, "(ALL)") 6342 uid_list = data[0].strip().split() 6343 if len(uid_list) <= 0: 6344 return None 6345 else: 6346 return (uid_list[0], uid_list[-1])
6347
6348 - def convert_date(self, date, add=None, imf=False):
6349 if add is None: 6350 add = datetime.timedelta() 6351 """ Convert a date object to a string 6352 with d-Mon-Y style for IMAP or the inverse 6353 case 6354 6355 add <timedelta> adds to the date object 6356 """ 6357 months = [None, "JAN","FEB","MAR","APR","MAY","JUN", 6358 "JUL", "AUG","SEP","OCT","NOV","DEC"] 6359 if isinstance(date, basestring): 6360 # Prevent unexpected date response format 6361 try: 6362 if "," in date: 6363 dayname, datestring = date.split(",") 6364 else: 6365 dayname, datestring = None, date 6366 date_list = datestring.strip().split() 6367 year = int(date_list[2]) 6368 month = months.index(date_list[1].upper()) 6369 day = int(date_list[0]) 6370 hms = map(int, date_list[3].split(":")) 6371 return datetime.datetime(year, month, day, 6372 hms[0], hms[1], hms[2]) + add 6373 except (ValueError, AttributeError, IndexError), e: 6374 LOGGER.error("Could not parse date text: %s. %s" % 6375 (date, e)) 6376 return None 6377 elif isinstance(date, (datetime.date, datetime.datetime)): 6378 if imf: date_format = "%a, %d %b %Y %H:%M:%S %z" 6379 else: date_format = "%d-%b-%Y" 6380 return (date + add).strftime(date_format) 6381 else: 6382 return None
6383 6384 @staticmethod
6385 - def header_represent(f, r):
6386 from email.header import decode_header 6387 text, encoding = decode_header(f)[0] 6388 if encoding: 6389 text = text.decode(encoding).encode('utf-8') 6390 return text
6391
6392 - def encode_text(self, text, charset, errors="replace"):
6393 """ convert text for mail to unicode""" 6394 if text is None: 6395 text = "" 6396 else: 6397 if isinstance(text, str): 6398 if charset is None: 6399 text = unicode(text, "utf-8", errors) 6400 else: 6401 text = unicode(text, charset, errors) 6402 else: 6403 raise Exception("Unsupported mail text type %s" % type(text)) 6404 return text.encode("utf-8")
6405
6406 - def get_charset(self, message):
6407 charset = message.get_content_charset() 6408 return charset
6409
6410 - def get_mailboxes(self):
6411 """ Query the mail database for mailbox names """ 6412 if self.static_names: 6413 # statically defined mailbox names 6414 self.connection.mailbox_names = self.static_names 6415 return self.static_names.keys() 6416 6417 mailboxes_list = self.connection.list() 6418 self.connection.mailbox_names = dict() 6419 mailboxes = list() 6420 x = 0 6421 for item in mailboxes_list[1]: 6422 x = x + 1 6423 item = item.strip() 6424 if not "NOSELECT" in item.upper(): 6425 sub_items = item.split("\"") 6426 sub_items = [sub_item for sub_item in sub_items \ 6427 if len(sub_item.strip()) > 0] 6428 # mailbox = sub_items[len(sub_items) -1] 6429 mailbox = sub_items[-1].strip() 6430 # remove unwanted characters and store original names 6431 # Don't allow leading non alphabetic characters 6432 mailbox_name = re.sub('^[_0-9]*', '', re.sub('[^_\w]','',re.sub('[/ ]','_',mailbox))) 6433 mailboxes.append(mailbox_name) 6434 self.connection.mailbox_names[mailbox_name] = mailbox 6435 6436 return mailboxes
6437
6438 - def get_query_mailbox(self, query):
6439 nofield = True 6440 tablename = None 6441 attr = query 6442 while nofield: 6443 if hasattr(attr, "first"): 6444 attr = attr.first 6445 if isinstance(attr, Field): 6446 return attr.tablename 6447 elif isinstance(attr, Query): 6448 pass 6449 else: 6450 return None 6451 else: 6452 return None 6453 return tablename
6454
6455 - def is_flag(self, flag):
6456 if self.search_fields.get(flag, None) in self.flags.values(): 6457 return True 6458 else: 6459 return False
6460
6461 - def define_tables(self, mailbox_names=None):
6462 """ 6463 Auto create common IMAP fileds 6464 6465 This function creates fields definitions "statically" 6466 meaning that custom fields as in other adapters should 6467 not be supported and definitions handled on a service/mode 6468 basis (local syntax for Gmail(r), Ymail(r) 6469 6470 Returns a dictionary with tablename, server native mailbox name 6471 pairs. 6472 """ 6473 if mailbox_names: 6474 # optional statically declared mailboxes 6475 self.static_names = mailbox_names 6476 else: 6477 self.static_names = None 6478 if not isinstance(self.connection.mailbox_names, dict): 6479 self.get_mailboxes() 6480 6481 names = self.connection.mailbox_names.keys() 6482 6483 for name in names: 6484 self.db.define_table("%s" % name, 6485 Field("uid", writable=False), 6486 Field("created", "datetime", writable=False), 6487 Field("content", "text", writable=False), 6488 Field("to", writable=False), 6489 Field("cc", writable=False), 6490 Field("bcc", writable=False), 6491 Field("sender", writable=False), 6492 Field("size", "integer", writable=False), 6493 Field("subject", writable=False), 6494 Field("mime", writable=False), 6495 Field("email", "text", writable=False, readable=False), 6496 Field("attachments", "text", writable=False, readable=False), 6497 Field("encoding", writable=False), 6498 Field("answered", "boolean"), 6499 Field("deleted", "boolean"), 6500 Field("draft", "boolean"), 6501 Field("flagged", "boolean"), 6502 Field("recent", "boolean", writable=False), 6503 Field("seen", "boolean") 6504 ) 6505 6506 # Set a special _mailbox attribute for storing 6507 # native mailbox names 6508 self.db[name].mailbox = \ 6509 self.connection.mailbox_names[name] 6510 6511 # decode quoted printable 6512 self.db[name].to.represent = self.db[name].cc.represent = \ 6513 self.db[name].bcc.represent = self.db[name].sender.represent = \ 6514 self.db[name].subject.represent = self.header_represent 6515 6516 # Set the db instance mailbox collections 6517 self.db.mailboxes = self.connection.mailbox_names 6518 return self.db.mailboxes
6519
6520 - def create_table(self, *args, **kwargs):
6521 # not implemented 6522 # but required by DAL 6523 pass
6524
6525 - def _select(self, query, fields, attributes):
6526 if use_common_filters(query): 6527 query = self.common_filter(query, [self.get_query_mailbox(query),]) 6528 return str(query)
6529
6530 - def select(self, query, fields, attributes):
6531 """ Search and Fetch records and return web2py rows 6532 """ 6533 # move this statement elsewhere (upper-level) 6534 if use_common_filters(query): 6535 query = self.common_filter(query, [self.get_query_mailbox(query),]) 6536 6537 import email 6538 # get records from imap server with search + fetch 6539 # convert results to a dictionary 6540 tablename = None 6541 fetch_results = list() 6542 6543 if isinstance(query, Query): 6544 tablename = self.get_table(query) 6545 mailbox = self.connection.mailbox_names.get(tablename, None) 6546 if mailbox is None: 6547 raise ValueError("Mailbox name not found: %s" % mailbox) 6548 else: 6549 # select with readonly 6550 result, selected = self.connection.select(mailbox, True) 6551 if result != "OK": 6552 raise Exception("IMAP error: %s" % selected) 6553 self.mailbox_size = int(selected[0]) 6554 search_query = "(%s)" % str(query).strip() 6555 search_result = self.connection.uid("search", None, search_query) 6556 # Normal IMAP response OK is assumed (change this) 6557 if search_result[0] == "OK": 6558 # For "light" remote server responses just get the first 6559 # ten records (change for non-experimental implementation) 6560 # However, light responses are not guaranteed with this 6561 # approach, just fewer messages. 6562 limitby = attributes.get('limitby', None) 6563 messages_set = search_result[1][0].split() 6564 # descending order 6565 messages_set.reverse() 6566 if limitby is not None: 6567 # TODO: orderby, asc/desc, limitby from complete message set 6568 messages_set = messages_set[int(limitby[0]):int(limitby[1])] 6569 6570 # keep the requests small for header/flags 6571 if any([(field.name in ["content", "size", 6572 "attachments", "email"]) for 6573 field in fields]): 6574 imap_fields = "(RFC822 FLAGS)" 6575 else: 6576 imap_fields = "(RFC822.HEADER FLAGS)" 6577 6578 if len(messages_set) > 0: 6579 # create fetch results object list 6580 # fetch each remote message and store it in memmory 6581 # (change to multi-fetch command syntax for faster 6582 # transactions) 6583 for uid in messages_set: 6584 # fetch the RFC822 message body 6585 typ, data = self.connection.uid("fetch", uid, imap_fields) 6586 if typ == "OK": 6587 fr = {"message": int(data[0][0].split()[0]), 6588 "uid": long(uid), 6589 "email": email.message_from_string(data[0][1]), 6590 "raw_message": data[0][1]} 6591 fr["multipart"] = fr["email"].is_multipart() 6592 # fetch flags for the message 6593 fr["flags"] = self.driver.ParseFlags(data[1]) 6594 fetch_results.append(fr) 6595 else: 6596 # error retrieving the message body 6597 raise Exception("IMAP error retrieving the body: %s" % data) 6598 else: 6599 raise Exception("IMAP search error: %s" % search_result[1]) 6600 elif isinstance(query, (Expression, basestring)): 6601 raise NotImplementedError() 6602 else: 6603 raise TypeError("Unexpected query type") 6604 6605 imapqry_dict = {} 6606 imapfields_dict = {} 6607 6608 if len(fields) == 1 and isinstance(fields[0], SQLALL): 6609 allfields = True 6610 elif len(fields) == 0: 6611 allfields = True 6612 else: 6613 allfields = False 6614 if allfields: 6615 colnames = ["%s.%s" % (tablename, field) for field in self.search_fields.keys()] 6616 else: 6617 colnames = ["%s.%s" % (tablename, field.name) for field in fields] 6618 6619 for k in colnames: 6620 imapfields_dict[k] = k 6621 6622 imapqry_list = list() 6623 imapqry_array = list() 6624 for fr in fetch_results: 6625 attachments = [] 6626 content = [] 6627 size = 0 6628 n = int(fr["message"]) 6629 item_dict = dict() 6630 message = fr["email"] 6631 uid = fr["uid"] 6632 charset = self.get_charset(message) 6633 flags = fr["flags"] 6634 raw_message = fr["raw_message"] 6635 # Return messages data mapping static fields 6636 # and fetched results. Mapping should be made 6637 # outside the select function (with auxiliary 6638 # instance methods) 6639 6640 # pending: search flags states trough the email message 6641 # instances for correct output 6642 6643 # preserve subject encoding (ASCII/quoted printable) 6644 6645 if "%s.id" % tablename in colnames: 6646 item_dict["%s.id" % tablename] = n 6647 if "%s.created" % tablename in colnames: 6648 item_dict["%s.created" % tablename] = self.convert_date(message["Date"]) 6649 if "%s.uid" % tablename in colnames: 6650 item_dict["%s.uid" % tablename] = uid 6651 if "%s.sender" % tablename in colnames: 6652 # If there is no encoding found in the message header 6653 # force utf-8 replacing characters (change this to 6654 # module's defaults). Applies to .sender, .to, .cc and .bcc fields 6655 item_dict["%s.sender" % tablename] = message["From"] 6656 if "%s.to" % tablename in colnames: 6657 item_dict["%s.to" % tablename] = message["To"] 6658 if "%s.cc" % tablename in colnames: 6659 if "Cc" in message.keys(): 6660 item_dict["%s.cc" % tablename] = message["Cc"] 6661 else: 6662 item_dict["%s.cc" % tablename] = "" 6663 if "%s.bcc" % tablename in colnames: 6664 if "Bcc" in message.keys(): 6665 item_dict["%s.bcc" % tablename] = message["Bcc"] 6666 else: 6667 item_dict["%s.bcc" % tablename] = "" 6668 if "%s.deleted" % tablename in colnames: 6669 item_dict["%s.deleted" % tablename] = "\\Deleted" in flags 6670 if "%s.draft" % tablename in colnames: 6671 item_dict["%s.draft" % tablename] = "\\Draft" in flags 6672 if "%s.flagged" % tablename in colnames: 6673 item_dict["%s.flagged" % tablename] = "\\Flagged" in flags 6674 if "%s.recent" % tablename in colnames: 6675 item_dict["%s.recent" % tablename] = "\\Recent" in flags 6676 if "%s.seen" % tablename in colnames: 6677 item_dict["%s.seen" % tablename] = "\\Seen" in flags 6678 if "%s.subject" % tablename in colnames: 6679 item_dict["%s.subject" % tablename] = message["Subject"] 6680 if "%s.answered" % tablename in colnames: 6681 item_dict["%s.answered" % tablename] = "\\Answered" in flags 6682 if "%s.mime" % tablename in colnames: 6683 item_dict["%s.mime" % tablename] = message.get_content_type() 6684 if "%s.encoding" % tablename in colnames: 6685 item_dict["%s.encoding" % tablename] = charset 6686 6687 # Here goes the whole RFC822 body as an email instance 6688 # for controller side custom processing 6689 # The message is stored as a raw string 6690 # >> email.message_from_string(raw string) 6691 # returns a Message object for enhanced object processing 6692 if "%s.email" % tablename in colnames: 6693 # WARNING: no encoding performed (raw message) 6694 item_dict["%s.email" % tablename] = raw_message 6695 6696 # Size measure as suggested in a Velocity Reviews post 6697 # by Tim Williams: "how to get size of email attachment" 6698 # Note: len() and server RFC822.SIZE reports doesn't match 6699 # To retrieve the server size for representation would add a new 6700 # fetch transaction to the process 6701 for part in message.walk(): 6702 maintype = part.get_content_maintype() 6703 if ("%s.attachments" % tablename in colnames) or \ 6704 ("%s.content" % tablename in colnames): 6705 payload = part.get_payload(decode=True) 6706 if payload: 6707 filename = part.get_filename() 6708 values = {"mime": part.get_content_type()} 6709 if ((filename or not "text" in maintype) and 6710 ("%s.attachments" % tablename in colnames)): 6711 values.update({"payload": payload, 6712 "filename": filename, 6713 "encoding": part.get_content_charset(), 6714 "disposition": part["Content-Disposition"]}) 6715 attachments.append(values) 6716 elif (("text" in maintype) and 6717 ("%s.content" % tablename in colnames)): 6718 values.update({"text": self.encode_text(payload, 6719 self.get_charset(part))}) 6720 content.append(values) 6721 6722 if "%s.size" % tablename in colnames: 6723 if part is not None: 6724 size += len(str(part)) 6725 item_dict["%s.content" % tablename] = content 6726 item_dict["%s.attachments" % tablename] = attachments 6727 item_dict["%s.size" % tablename] = size 6728 imapqry_list.append(item_dict) 6729 6730 # extra object mapping for the sake of rows object 6731 # creation (sends an array or lists) 6732 for item_dict in imapqry_list: 6733 imapqry_array_item = list() 6734 for fieldname in colnames: 6735 imapqry_array_item.append(item_dict[fieldname]) 6736 imapqry_array.append(imapqry_array_item) 6737 6738 # parse result and return a rows object 6739 colnames = colnames 6740 processor = attributes.get('processor',self.parse) 6741 return processor(imapqry_array, fields, colnames)
6742
6743 - def _insert(self, table, fields):
6744 def add_payload(message, obj): 6745 payload = Message() 6746 encoding = obj.get("encoding", "utf-8") 6747 if encoding and (encoding.upper() in 6748 ("BASE64", "7BIT", "8BIT", "BINARY")): 6749 payload.add_header("Content-Transfer-Encoding", encoding) 6750 else: 6751 payload.set_charset(encoding) 6752 mime = obj.get("mime", None) 6753 if mime: 6754 payload.set_type(mime) 6755 if "text" in obj: 6756 payload.set_payload(obj["text"]) 6757 elif "payload" in obj: 6758 payload.set_payload(obj["payload"]) 6759 if "filename" in obj and obj["filename"]: 6760 payload.add_header("Content-Disposition", 6761 "attachment", filename=obj["filename"]) 6762 message.attach(payload)
6763 6764 mailbox = table.mailbox 6765 d = dict(((k.name, v) for k, v in fields)) 6766 date_time = d.get("created") or datetime.datetime.now() 6767 struct_time = date_time.timetuple() 6768 if len(d) > 0: 6769 message = d.get("email", None) 6770 attachments = d.get("attachments", []) 6771 content = d.get("content", []) 6772 flags = " ".join(["\\%s" % flag.capitalize() for flag in 6773 ("answered", "deleted", "draft", "flagged", 6774 "recent", "seen") if d.get(flag, False)]) 6775 if not message: 6776 from email.message import Message 6777 mime = d.get("mime", None) 6778 charset = d.get("encoding", None) 6779 message = Message() 6780 message["from"] = d.get("sender", "") 6781 message["subject"] = d.get("subject", "") 6782 message["date"] = self.convert_date(date_time, imf=True) 6783 6784 if mime: 6785 message.set_type(mime) 6786 if charset: 6787 message.set_charset(charset) 6788 for item in ("to", "cc", "bcc"): 6789 value = d.get(item, "") 6790 if isinstance(value, basestring): 6791 message[item] = value 6792 else: 6793 message[item] = ";".join([i for i in 6794 value]) 6795 if (not message.is_multipart() and 6796 (not message.get_content_type().startswith( 6797 "multipart"))): 6798 if isinstance(content, basestring): 6799 message.set_payload(content) 6800 elif len(content) > 0: 6801 message.set_payload(content[0]["text"]) 6802 else: 6803 [add_payload(message, c) for c in content] 6804 [add_payload(message, a) for a in attachments] 6805 message = message.as_string() 6806 return (mailbox, flags, struct_time, message) 6807 else: 6808 raise NotImplementedError("IMAP empty insert is not implemented") 6809
6810 - def insert(self, table, fields):
6811 values = self._insert(table, fields) 6812 result, data = self.connection.append(*values) 6813 if result == "OK": 6814 uid = int(re.findall("\d+", str(data))[-1]) 6815 return self.db(table.uid==uid).select(table.id).first().id 6816 else: 6817 raise Exception("IMAP message append failed: %s" % data)
6818
6819 - def _update(self, tablename, query, fields, commit=False):
6820 # TODO: the adapter should implement an .expand method 6821 commands = list() 6822 if use_common_filters(query): 6823 query = self.common_filter(query, [tablename,]) 6824 mark = [] 6825 unmark = [] 6826 if query: 6827 for item in fields: 6828 field = item[0] 6829 name = field.name 6830 value = item[1] 6831 if self.is_flag(name): 6832 flag = self.search_fields[name] 6833 if (value is not None) and (flag != "\\Recent"): 6834 if value: 6835 mark.append(flag) 6836 else: 6837 unmark.append(flag) 6838 result, data = self.connection.select( 6839 self.connection.mailbox_names[tablename]) 6840 string_query = "(%s)" % query 6841 result, data = self.connection.search(None, string_query) 6842 store_list = [item.strip() for item in data[0].split() 6843 if item.strip().isdigit()] 6844 # build commands for marked flags 6845 for number in store_list: 6846 result = None 6847 if len(mark) > 0: 6848 commands.append((number, "+FLAGS", "(%s)" % " ".join(mark))) 6849 if len(unmark) > 0: 6850 commands.append((number, "-FLAGS", "(%s)" % " ".join(unmark))) 6851 return commands
6852
6853 - def update(self, tablename, query, fields):
6854 rowcount = 0 6855 commands = self._update(tablename, query, fields) 6856 for command in commands: 6857 result, data = self.connection.store(*command) 6858 if result == "OK": 6859 rowcount += 1 6860 else: 6861 raise Exception("IMAP storing error: %s" % data) 6862 return rowcount
6863
6864 - def _count(self, query, distinct=None):
6865 raise NotImplementedError()
6866
6867 - def count(self,query,distinct=None):
6868 counter = 0 6869 tablename = self.get_query_mailbox(query) 6870 if query and tablename is not None: 6871 if use_common_filters(query): 6872 query = self.common_filter(query, [tablename,]) 6873 result, data = self.connection.select(self.connection.mailbox_names[tablename]) 6874 string_query = "(%s)" % query 6875 result, data = self.connection.search(None, string_query) 6876 store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()] 6877 counter = len(store_list) 6878 return counter
6879
6880 - def delete(self, tablename, query):
6881 counter = 0 6882 if query: 6883 if use_common_filters(query): 6884 query = self.common_filter(query, [tablename,]) 6885 result, data = self.connection.select(self.connection.mailbox_names[tablename]) 6886 string_query = "(%s)" % query 6887 result, data = self.connection.search(None, string_query) 6888 store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()] 6889 for number in store_list: 6890 result, data = self.connection.store(number, "+FLAGS", "(\\Deleted)") 6891 if result == "OK": 6892 counter += 1 6893 else: 6894 raise Exception("IMAP store error: %s" % data) 6895 if counter > 0: 6896 result, data = self.connection.expunge() 6897 return counter
6898
6899 - def BELONGS(self, first, second):
6900 result = None 6901 name = self.search_fields[first.name] 6902 if name == "MESSAGE": 6903 values = [str(val) for val in second if str(val).isdigit()] 6904 result = "%s" % ",".join(values).strip() 6905 6906 elif name == "UID": 6907 values = [str(val) for val in second if str(val).isdigit()] 6908 result = "UID %s" % ",".join(values).strip() 6909 6910 else: 6911 raise Exception("Operation not supported") 6912 # result = "(%s %s)" % (self.expand(first), self.expand(second)) 6913 return result
6914
6915 - def CONTAINS(self, first, second, case_sensitive=False):
6916 # silently ignore, only case sensitive 6917 result = None 6918 name = self.search_fields[first.name] 6919 6920 if name in ("FROM", "TO", "SUBJECT", "TEXT"): 6921 result = "%s \"%s\"" % (name, self.expand(second)) 6922 else: 6923 if first.name in ("cc", "bcc"): 6924 result = "%s \"%s\"" % (first.name.upper(), self.expand(second)) 6925 elif first.name == "mime": 6926 result = "HEADER Content-Type \"%s\"" % self.expand(second) 6927 else: 6928 raise Exception("Operation not supported") 6929 return result
6930
6931 - def GT(self, first, second):
6932 result = None 6933 name = self.search_fields[first.name] 6934 if name == "MESSAGE": 6935 last_message = self.get_last_message(first.tablename) 6936 result = "%d:%d" % (int(self.expand(second)) + 1, last_message) 6937 elif name == "UID": 6938 # GT and LT may not return 6939 # expected sets depending on 6940 # the uid format implemented 6941 try: 6942 pedestal, threshold = self.get_uid_bounds(first.tablename) 6943 except TypeError: 6944 e = sys.exc_info()[1] 6945 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6946 return "" 6947 try: 6948 lower_limit = int(self.expand(second)) + 1 6949 except (ValueError, TypeError): 6950 e = sys.exc_info()[1] 6951 raise Exception("Operation not supported (non integer UID)") 6952 result = "UID %s:%s" % (lower_limit, threshold) 6953 elif name == "DATE": 6954 result = "SINCE %s" % self.convert_date(second, add=datetime.timedelta(1)) 6955 elif name == "SIZE": 6956 result = "LARGER %s" % self.expand(second) 6957 else: 6958 raise Exception("Operation not supported") 6959 return result
6960
6961 - def GE(self, first, second):
6962 result = None 6963 name = self.search_fields[first.name] 6964 if name == "MESSAGE": 6965 last_message = self.get_last_message(first.tablename) 6966 result = "%s:%s" % (self.expand(second), last_message) 6967 elif name == "UID": 6968 # GT and LT may not return 6969 # expected sets depending on 6970 # the uid format implemented 6971 try: 6972 pedestal, threshold = self.get_uid_bounds(first.tablename) 6973 except TypeError: 6974 e = sys.exc_info()[1] 6975 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6976 return "" 6977 lower_limit = self.expand(second) 6978 result = "UID %s:%s" % (lower_limit, threshold) 6979 elif name == "DATE": 6980 result = "SINCE %s" % self.convert_date(second) 6981 else: 6982 raise Exception("Operation not supported") 6983 return result
6984
6985 - def LT(self, first, second):
6986 result = None 6987 name = self.search_fields[first.name] 6988 if name == "MESSAGE": 6989 result = "%s:%s" % (1, int(self.expand(second)) - 1) 6990 elif name == "UID": 6991 try: 6992 pedestal, threshold = self.get_uid_bounds(first.tablename) 6993 except TypeError: 6994 e = sys.exc_info()[1] 6995 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6996 return "" 6997 try: 6998 upper_limit = int(self.expand(second)) - 1 6999 except (ValueError, TypeError): 7000 e = sys.exc_info()[1] 7001 raise Exception("Operation not supported (non integer UID)") 7002 result = "UID %s:%s" % (pedestal, upper_limit) 7003 elif name == "DATE": 7004 result = "BEFORE %s" % self.convert_date(second) 7005 elif name == "SIZE": 7006 result = "SMALLER %s" % self.expand(second) 7007 else: 7008 raise Exception("Operation not supported") 7009 return result
7010
7011 - def LE(self, first, second):
7012 result = None 7013 name = self.search_fields[first.name] 7014 if name == "MESSAGE": 7015 result = "%s:%s" % (1, self.expand(second)) 7016 elif name == "UID": 7017 try: 7018 pedestal, threshold = self.get_uid_bounds(first.tablename) 7019 except TypeError: 7020 e = sys.exc_info()[1] 7021 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 7022 return "" 7023 upper_limit = int(self.expand(second)) 7024 result = "UID %s:%s" % (pedestal, upper_limit) 7025 elif name == "DATE": 7026 result = "BEFORE %s" % self.convert_date(second, add=datetime.timedelta(1)) 7027 else: 7028 raise Exception("Operation not supported") 7029 return result
7030
7031 - def NE(self, first, second=None):
7032 if (second is None) and isinstance(first, Field): 7033 # All records special table query 7034 if first.type == "id": 7035 return self.GE(first, 1) 7036 result = self.NOT(self.EQ(first, second)) 7037 result = result.replace("NOT NOT", "").strip() 7038 return result
7039
7040 - def EQ(self,first,second):
7041 name = self.search_fields[first.name] 7042 result = None 7043 if name is not None: 7044 if name == "MESSAGE": 7045 # query by message sequence number 7046 result = "%s" % self.expand(second) 7047 elif name == "UID": 7048 result = "UID %s" % self.expand(second) 7049 elif name == "DATE": 7050 result = "ON %s" % self.convert_date(second) 7051 7052 elif name in self.flags.values(): 7053 if second: 7054 result = "%s" % (name.upper()[1:]) 7055 else: 7056 result = "NOT %s" % (name.upper()[1:]) 7057 else: 7058 raise Exception("Operation not supported") 7059 else: 7060 raise Exception("Operation not supported") 7061 return result
7062
7063 - def AND(self, first, second):
7064 result = "%s %s" % (self.expand(first), self.expand(second)) 7065 return result
7066
7067 - def OR(self, first, second):
7068 result = "OR %s %s" % (self.expand(first), self.expand(second)) 7069 return "%s" % result.replace("OR OR", "OR")
7070
7071 - def NOT(self, first):
7072 result = "NOT %s" % self.expand(first) 7073 return result
7074 7075 ######################################################################## 7076 # end of adapters 7077 ######################################################################## 7078 7079 ADAPTERS = { 7080 'sqlite': SQLiteAdapter, 7081 'spatialite': SpatiaLiteAdapter, 7082 'sqlite:memory': SQLiteAdapter, 7083 'spatialite:memory': SpatiaLiteAdapter, 7084 'mysql': MySQLAdapter, 7085 'postgres': PostgreSQLAdapter, 7086 'postgres:psycopg2': PostgreSQLAdapter, 7087 'postgres:pg8000': PostgreSQLAdapter, 7088 'postgres2:psycopg2': NewPostgreSQLAdapter, 7089 'postgres2:pg8000': NewPostgreSQLAdapter, 7090 'oracle': OracleAdapter, 7091 'mssql': MSSQLAdapter, 7092 'mssql2': MSSQL2Adapter, 7093 'mssql3': MSSQL3Adapter, 7094 'mssql4' : MSSQL4Adapter, 7095 'vertica': VerticaAdapter, 7096 'sybase': SybaseAdapter, 7097 'db2': DB2Adapter, 7098 'teradata': TeradataAdapter, 7099 'informix': InformixAdapter, 7100 'informix-se': InformixSEAdapter, 7101 'firebird': FireBirdAdapter, 7102 'firebird_embedded': FireBirdAdapter, 7103 'ingres': IngresAdapter, 7104 'ingresu': IngresUnicodeAdapter, 7105 'sapdb': SAPDBAdapter, 7106 'cubrid': CubridAdapter, 7107 'jdbc:sqlite': JDBCSQLiteAdapter, 7108 'jdbc:sqlite:memory': JDBCSQLiteAdapter, 7109 'jdbc:postgres': JDBCPostgreSQLAdapter, 7110 'gae': GoogleDatastoreAdapter, # discouraged, for backward compatibility 7111 'google:datastore': GoogleDatastoreAdapter, 7112 'google:sql': GoogleSQLAdapter, 7113 'couchdb': CouchDBAdapter, 7114 'mongodb': MongoDBAdapter, 7115 'imap': IMAPAdapter 7116 }
7117 7118 -def sqlhtml_validators(field):
7119 """ 7120 Field type validation, using web2py's validators mechanism. 7121 7122 makes sure the content of a field is in line with the declared 7123 fieldtype 7124 """ 7125 db = field.db 7126 try: 7127 from gluon import validators 7128 except ImportError: 7129 return [] 7130 field_type, field_length = field.type, field.length 7131 if isinstance(field_type, SQLCustomType): 7132 if hasattr(field_type, 'validator'): 7133 return field_type.validator 7134 else: 7135 field_type = field_type.type 7136 elif not isinstance(field_type,str): 7137 return [] 7138 requires=[] 7139 def ff(r,id): 7140 row=r(id) 7141 if not row: 7142 return id 7143 elif hasattr(r, '_format') and isinstance(r._format,str): 7144 return r._format % row 7145 elif hasattr(r, '_format') and callable(r._format): 7146 return r._format(row) 7147 else: 7148 return id
7149 if field_type in (('string', 'text', 'password')): 7150 requires.append(validators.IS_LENGTH(field_length)) 7151 elif field_type == 'json': 7152 requires.append(validators.IS_EMPTY_OR(validators.IS_JSON(native_json=field.db._adapter.native_json))) 7153 elif field_type == 'double' or field_type == 'float': 7154 requires.append(validators.IS_FLOAT_IN_RANGE(-1e100, 1e100)) 7155 elif field_type == 'integer': 7156 requires.append(validators.IS_INT_IN_RANGE(-2**31, 2**31)) 7157 elif field_type == 'bigint': 7158 requires.append(validators.IS_INT_IN_RANGE(-2**63, 2**63)) 7159 elif field_type.startswith('decimal'): 7160 requires.append(validators.IS_DECIMAL_IN_RANGE(-10**10, 10**10)) 7161 elif field_type == 'date': 7162 requires.append(validators.IS_DATE()) 7163 elif field_type == 'time': 7164 requires.append(validators.IS_TIME()) 7165 elif field_type == 'datetime': 7166 requires.append(validators.IS_DATETIME()) 7167 elif db and field_type.startswith('reference') and \ 7168 field_type.find('.') < 0 and \ 7169 field_type[10:] in db.tables: 7170 referenced = db[field_type[10:]] 7171 def repr_ref(id, row=None, r=referenced, f=ff): return f(r, id) 7172 field.represent = field.represent or repr_ref 7173 if hasattr(referenced, '_format') and referenced._format: 7174 requires = validators.IS_IN_DB(db,referenced._id, 7175 referenced._format) 7176 if field.unique: 7177 requires._and = validators.IS_NOT_IN_DB(db,field) 7178 if field.tablename == field_type[10:]: 7179 return validators.IS_EMPTY_OR(requires) 7180 return requires 7181 elif db and field_type.startswith('list:reference') and \ 7182 field_type.find('.') < 0 and \ 7183 field_type[15:] in db.tables: 7184 referenced = db[field_type[15:]] 7185 def list_ref_repr(ids, row=None, r=referenced, f=ff): 7186 if not ids: 7187 return None 7188 refs = None 7189 db, id = r._db, r._id 7190 if isinstance(db._adapter, GoogleDatastoreAdapter): 7191 def count(values): return db(id.belongs(values)).select(id) 7192 rx = range(0, len(ids), 30) 7193 refs = reduce(lambda a,b:a&b, [count(ids[i:i+30]) for i in rx]) 7194 else: 7195 refs = db(id.belongs(ids)).select(id) 7196 return (refs and ', '.join(f(r,x.id) for x in refs) or '') 7197 field.represent = field.represent or list_ref_repr 7198 if hasattr(referenced, '_format') and referenced._format: 7199 requires = validators.IS_IN_DB(db,referenced._id, 7200 referenced._format,multiple=True) 7201 else: 7202 requires = validators.IS_IN_DB(db,referenced._id, 7203 multiple=True) 7204 if field.unique: 7205 requires._and = validators.IS_NOT_IN_DB(db,field) 7206 if not field.notnull: 7207 requires = validators.IS_EMPTY_OR(requires) 7208 return requires 7209 elif field_type.startswith('list:'): 7210 def repr_list(values,row=None): return', '.join(str(v) for v in (values or [])) 7211 field.represent = field.represent or repr_list 7212 if field.unique: 7213 requires.insert(0,validators.IS_NOT_IN_DB(db,field)) 7214 sff = ['in', 'do', 'da', 'ti', 'de', 'bo'] 7215 if field.notnull and not field_type[:2] in sff: 7216 requires.insert(0, validators.IS_NOT_EMPTY()) 7217 elif not field.notnull and field_type[:2] in sff and requires: 7218 requires[-1] = validators.IS_EMPTY_OR(requires[-1]) 7219 return requires 7220
7221 7222 -def bar_escape(item):
7223 return str(item).replace('|', '||')
7224
7225 -def bar_encode(items):
7226 return '|%s|' % '|'.join(bar_escape(item) for item in items if str(item).strip())
7227
7228 -def bar_decode_integer(value):
7229 if not hasattr(value,'split') and hasattr(value,'read'): 7230 value = value.read() 7231 return [long(x) for x in value.split('|') if x.strip()]
7232
7233 -def bar_decode_string(value):
7234 return [x.replace('||', '|') for x in 7235 REGEX_UNPACK.split(value[1:-1]) if x.strip()]
7236
7237 7238 -class Row(object):
7239 7240 """ 7241 a dictionary that lets you do d['a'] as well as d.a 7242 this is only used to store a Row 7243 """ 7244 7245 __init__ = lambda self,*args,**kwargs: self.__dict__.update(*args,**kwargs) 7246
7247 - def __getitem__(self, k):
7248 key=str(k) 7249 _extra = self.__dict__.get('_extra', None) 7250 if _extra is not None: 7251 v = _extra.get(key, DEFAULT) 7252 if v != DEFAULT: 7253 return v 7254 m = REGEX_TABLE_DOT_FIELD.match(key) 7255 if m: 7256 try: 7257 return ogetattr(self, m.group(1))[m.group(2)] 7258 except (KeyError,AttributeError,TypeError): 7259 key = m.group(2) 7260 try: 7261 return ogetattr(self, key) 7262 except (KeyError,AttributeError,TypeError), ae: 7263 try: 7264 self[key] = ogetattr(self,'__get_lazy_reference__')(key) 7265 return self[key] 7266 except: 7267 raise ae
7268 7269 __setitem__ = lambda self, key, value: setattr(self, str(key), value) 7270 7271 __delitem__ = object.__delattr__ 7272 7273 __copy__ = lambda self: Row(self) 7274 7275 __call__ = __getitem__ 7276 7277
7278 - def get(self, key, default=None):
7279 try: 7280 return self.__getitem__(key) 7281 except(KeyError, AttributeError, TypeError): 7282 return self.__dict__.get(key,default)
7283 7284 has_key = __contains__ = lambda self, key: key in self.__dict__ 7285 7286 __nonzero__ = lambda self: len(self.__dict__)>0 7287 7288 update = lambda self, *args, **kwargs: self.__dict__.update(*args, **kwargs) 7289 7290 keys = lambda self: self.__dict__.keys() 7291 7292 items = lambda self: self.__dict__.items() 7293 7294 values = lambda self: self.__dict__.values() 7295 7296 __iter__ = lambda self: self.__dict__.__iter__() 7297 7298 iteritems = lambda self: self.__dict__.iteritems() 7299 7300 __str__ = __repr__ = lambda self: '<Row %s>' % self.as_dict() 7301 7302 __int__ = lambda self: object.__getattribute__(self,'id') 7303 7304 __long__ = lambda self: long(object.__getattribute__(self,'id')) 7305 7306 __getattr__ = __getitem__ 7307 7308 # def __getattribute__(self, key): 7309 # try: 7310 # return object.__getattribute__(self, key) 7311 # except AttributeError, ae: 7312 # try: 7313 # return self.__get_lazy_reference__(key) 7314 # except: 7315 # raise ae 7316
7317 - def __eq__(self,other):
7318 try: 7319 return self.as_dict() == other.as_dict() 7320 except AttributeError: 7321 return False
7322
7323 - def __ne__(self,other):
7324 return not (self == other)
7325
7326 - def __copy__(self):
7327 return Row(dict(self))
7328
7329 - def as_dict(self, datetime_to_str=False, custom_types=None):
7330 SERIALIZABLE_TYPES = [str, unicode, int, long, float, bool, list, dict] 7331 if isinstance(custom_types,(list,tuple,set)): 7332 SERIALIZABLE_TYPES += custom_types 7333 elif custom_types: 7334 SERIALIZABLE_TYPES.append(custom_types) 7335 d = dict(self) 7336 for k in copy.copy(d.keys()): 7337 v=d[k] 7338 if d[k] is None: 7339 continue 7340 elif isinstance(v,Row): 7341 d[k]=v.as_dict() 7342 elif isinstance(v,Reference): 7343 d[k]=long(v) 7344 elif isinstance(v,decimal.Decimal): 7345 d[k]=float(v) 7346 elif isinstance(v, (datetime.date, datetime.datetime, datetime.time)): 7347 if datetime_to_str: 7348 d[k] = v.isoformat().replace('T',' ')[:19] 7349 elif not isinstance(v,tuple(SERIALIZABLE_TYPES)): 7350 del d[k] 7351 return d
7352
7353 - def as_xml(self, row_name="row", colnames=None, indent=' '):
7354 def f(row,field,indent=' '): 7355 if isinstance(row,Row): 7356 spc = indent+' \n' 7357 items = [f(row[x],x,indent+' ') for x in row] 7358 return '%s<%s>\n%s\n%s</%s>' % ( 7359 indent, 7360 field, 7361 spc.join(item for item in items if item), 7362 indent, 7363 field) 7364 elif not callable(row): 7365 if REGEX_ALPHANUMERIC.match(field): 7366 return '%s<%s>%s</%s>' % (indent,field,row,field) 7367 else: 7368 return '%s<extra name="%s">%s</extra>' % \ 7369 (indent,field,row) 7370 else: 7371 return None
7372 return f(self, row_name, indent=indent)
7373
7374 - def as_json(self, mode="object", default=None, colnames=None, 7375 serialize=True, **kwargs):
7376 """ 7377 serializes the row to a JSON object 7378 kwargs are passed to .as_dict method 7379 only "object" mode supported 7380 7381 serialize = False used by Rows.as_json 7382 TODO: return array mode with query column order 7383 7384 mode and colnames are not implemented 7385 """ 7386 7387 item = self.as_dict(**kwargs) 7388 if serialize: 7389 if have_serializers: 7390 return serializers.json(item, 7391 default=default or 7392 serializers.custom_json) 7393 elif simplejson: 7394 return simplejson.dumps(item) 7395 else: 7396 raise RuntimeError("missing simplejson") 7397 else: 7398 return item
7399
7400 7401 ################################################################################ 7402 # Everything below should be independent of the specifics of the database 7403 # and should work for RDBMs and some NoSQL databases 7404 ################################################################################ 7405 7406 -class SQLCallableList(list):
7407 - def __call__(self):
7408 return copy.copy(self)
7409
7410 -def smart_query(fields,text):
7411 if not isinstance(fields,(list,tuple)): 7412 fields = [fields] 7413 new_fields = [] 7414 for field in fields: 7415 if isinstance(field,Field): 7416 new_fields.append(field) 7417 elif isinstance(field,Table): 7418 for ofield in field: 7419 new_fields.append(ofield) 7420 else: 7421 raise RuntimeError("fields must be a list of fields") 7422 fields = new_fields 7423 field_map = {} 7424 for field in fields: 7425 n = field.name.lower() 7426 if not n in field_map: 7427 field_map[n] = field 7428 n = str(field).lower() 7429 if not n in field_map: 7430 field_map[n] = field 7431 constants = {} 7432 i = 0 7433 while True: 7434 m = REGEX_CONST_STRING.search(text) 7435 if not m: break 7436 text = text[:m.start()]+('#%i' % i)+text[m.end():] 7437 constants[str(i)] = m.group()[1:-1] 7438 i+=1 7439 text = re.sub('\s+',' ',text).lower() 7440 for a,b in [('&','and'), 7441 ('|','or'), 7442 ('~','not'), 7443 ('==','='), 7444 ('<','<'), 7445 ('>','>'), 7446 ('<=','<='), 7447 ('>=','>='), 7448 ('<>','!='), 7449 ('=<','<='), 7450 ('=>','>='), 7451 ('=','='), 7452 (' less or equal than ','<='), 7453 (' greater or equal than ','>='), 7454 (' equal or less than ','<='), 7455 (' equal or greater than ','>='), 7456 (' less or equal ','<='), 7457 (' greater or equal ','>='), 7458 (' equal or less ','<='), 7459 (' equal or greater ','>='), 7460 (' not equal to ','!='), 7461 (' not equal ','!='), 7462 (' equal to ','='), 7463 (' equal ','='), 7464 (' equals ','='), 7465 (' less than ','<'), 7466 (' greater than ','>'), 7467 (' starts with ','startswith'), 7468 (' ends with ','endswith'), 7469 (' not in ' , 'notbelongs'), 7470 (' in ' , 'belongs'), 7471 (' is ','=')]: 7472 if a[0]==' ': 7473 text = text.replace(' is'+a,' %s ' % b) 7474 text = text.replace(a,' %s ' % b) 7475 text = re.sub('\s+',' ',text).lower() 7476 text = re.sub('(?P<a>[\<\>\!\=])\s+(?P<b>[\<\>\!\=])','\g<a>\g<b>',text) 7477 query = field = neg = op = logic = None 7478 for item in text.split(): 7479 if field is None: 7480 if item == 'not': 7481 neg = True 7482 elif not neg and not logic and item in ('and','or'): 7483 logic = item 7484 elif item in field_map: 7485 field = field_map[item] 7486 else: 7487 raise RuntimeError("Invalid syntax") 7488 elif not field is None and op is None: 7489 op = item 7490 elif not op is None: 7491 if item.startswith('#'): 7492 if not item[1:] in constants: 7493 raise RuntimeError("Invalid syntax") 7494 value = constants[item[1:]] 7495 else: 7496 value = item 7497 if field.type in ('text', 'string', 'json'): 7498 if op == '=': op = 'like' 7499 if op == '=': new_query = field==value 7500 elif op == '<': new_query = field<value 7501 elif op == '>': new_query = field>value 7502 elif op == '<=': new_query = field<=value 7503 elif op == '>=': new_query = field>=value 7504 elif op == '!=': new_query = field!=value 7505 elif op == 'belongs': new_query = field.belongs(value.split(',')) 7506 elif op == 'notbelongs': new_query = ~field.belongs(value.split(',')) 7507 elif field.type in ('text', 'string', 'json'): 7508 if op == 'contains': new_query = field.contains(value) 7509 elif op == 'like': new_query = field.like(value) 7510 elif op == 'startswith': new_query = field.startswith(value) 7511 elif op == 'endswith': new_query = field.endswith(value) 7512 else: raise RuntimeError("Invalid operation") 7513 elif field._db._adapter.dbengine=='google:datastore' and \ 7514 field.type in ('list:integer', 'list:string', 'list:reference'): 7515 if op == 'contains': new_query = field.contains(value) 7516 else: raise RuntimeError("Invalid operation") 7517 else: raise RuntimeError("Invalid operation") 7518 if neg: new_query = ~new_query 7519 if query is None: 7520 query = new_query 7521 elif logic == 'and': 7522 query &= new_query 7523 elif logic == 'or': 7524 query |= new_query 7525 field = op = neg = logic = None 7526 return query
7527
7528 -class DAL(object):
7529 7530 """ 7531 an instance of this class represents a database connection 7532 7533 Example:: 7534 7535 db = DAL('sqlite://test.db') 7536 7537 or 7538 7539 db = DAL(**{"uri": ..., "tables": [...]...}) # experimental 7540 7541 db.define_table('tablename', Field('fieldname1'), 7542 Field('fieldname2')) 7543 """ 7544
7545 - def __new__(cls, uri='sqlite://dummy.db', *args, **kwargs):
7546 if not hasattr(THREAD_LOCAL,'db_instances'): 7547 THREAD_LOCAL.db_instances = {} 7548 if not hasattr(THREAD_LOCAL,'db_instances_zombie'): 7549 THREAD_LOCAL.db_instances_zombie = {} 7550 if uri == '<zombie>': 7551 db_uid = kwargs['db_uid'] # a zombie must have a db_uid! 7552 if db_uid in THREAD_LOCAL.db_instances: 7553 db_group = THREAD_LOCAL.db_instances[db_uid] 7554 db = db_group[-1] 7555 elif db_uid in THREAD_LOCAL.db_instances_zombie: 7556 db = THREAD_LOCAL.db_instances_zombie[db_uid] 7557 else: 7558 db = super(DAL, cls).__new__(cls) 7559 THREAD_LOCAL.db_instances_zombie[db_uid] = db 7560 else: 7561 db_uid = kwargs.get('db_uid',hashlib_md5(repr(uri)).hexdigest()) 7562 if db_uid in THREAD_LOCAL.db_instances_zombie: 7563 db = THREAD_LOCAL.db_instances_zombie[db_uid] 7564 del THREAD_LOCAL.db_instances_zombie[db_uid] 7565 else: 7566 db = super(DAL, cls).__new__(cls) 7567 db_group = THREAD_LOCAL.db_instances.get(db_uid,[]) 7568 db_group.append(db) 7569 THREAD_LOCAL.db_instances[db_uid] = db_group 7570 db._db_uid = db_uid 7571 return db
7572 7573 @staticmethod
7574 - def set_folder(folder):
7575 """ 7576 # ## this allows gluon to set a folder for this thread 7577 # ## <<<<<<<<< Should go away as new DAL replaces old sql.py 7578 """ 7579 BaseAdapter.set_folder(folder)
7580 7581 @staticmethod
7582 - def get_instances():
7583 """ 7584 Returns a dictionary with uri as key with timings and defined tables 7585 {'sqlite://storage.sqlite': { 7586 'dbstats': [(select auth_user.email from auth_user, 0.02009)], 7587 'dbtables': { 7588 'defined': ['auth_cas', 'auth_event', 'auth_group', 7589 'auth_membership', 'auth_permission', 'auth_user'], 7590 'lazy': '[]' 7591 } 7592 } 7593 } 7594 """ 7595 dbs = getattr(THREAD_LOCAL,'db_instances',{}).items() 7596 infos = {} 7597 for db_uid, db_group in dbs: 7598 for db in db_group: 7599 if not db._uri: 7600 continue 7601 k = hide_password(db._adapter.uri) 7602 infos[k] = dict( 7603 dbstats = [(row[0], row[1]) for row in db._timings], 7604 dbtables = {'defined': sorted( 7605 list(set(db.tables)-set(db._LAZY_TABLES.keys()))), 7606 'lazy': sorted(db._LAZY_TABLES.keys())}) 7607 return infos
7608 7609 @staticmethod
7610 - def distributed_transaction_begin(*instances):
7611 if not instances: 7612 return 7613 thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) 7614 keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] 7615 instances = enumerate(instances) 7616 for (i, db) in instances: 7617 if not db._adapter.support_distributed_transaction(): 7618 raise SyntaxError( 7619 'distributed transaction not suported by %s' % db._dbname) 7620 for (i, db) in instances: 7621 db._adapter.distributed_transaction_begin(keys[i])
7622 7623 @staticmethod
7624 - def distributed_transaction_commit(*instances):
7625 if not instances: 7626 return 7627 instances = enumerate(instances) 7628 thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) 7629 keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] 7630 for (i, db) in instances: 7631 if not db._adapter.support_distributed_transaction(): 7632 raise SyntaxError( 7633 'distributed transaction not suported by %s' % db._dbanme) 7634 try: 7635 for (i, db) in instances: 7636 db._adapter.prepare(keys[i]) 7637 except: 7638 for (i, db) in instances: 7639 db._adapter.rollback_prepared(keys[i]) 7640 raise RuntimeError('failure to commit distributed transaction') 7641 else: 7642 for (i, db) in instances: 7643 db._adapter.commit_prepared(keys[i]) 7644 return
7645
7646 - def __init__(self, uri=DEFAULT_URI, 7647 pool_size=0, folder=None, 7648 db_codec='UTF-8', check_reserved=None, 7649 migrate=True, fake_migrate=False, 7650 migrate_enabled=True, fake_migrate_all=False, 7651 decode_credentials=False, driver_args=None, 7652 adapter_args=None, attempts=5, auto_import=False, 7653 bigint_id=False, debug=False, lazy_tables=False, 7654 db_uid=None, do_connect=True, 7655 after_connection=None, tables=None):
7656 """ 7657 Creates a new Database Abstraction Layer instance. 7658 7659 Keyword arguments: 7660 7661 :uri: string that contains information for connecting to a database. 7662 (default: 'sqlite://dummy.db') 7663 7664 experimental: you can specify a dictionary as uri 7665 parameter i.e. with 7666 db = DAL({"uri": "sqlite://storage.sqlite", 7667 "tables": {...}, ...}) 7668 7669 for an example of dict input you can check the output 7670 of the scaffolding db model with 7671 7672 db.as_dict() 7673 7674 Note that for compatibility with Python older than 7675 version 2.6.5 you should cast your dict input keys 7676 to str due to a syntax limitation on kwarg names. 7677 for proper DAL dictionary input you can use one of: 7678 7679 obj = serializers.cast_keys(dict, [encoding="utf-8"]) 7680 7681 or else (for parsing json input) 7682 7683 obj = serializers.loads_json(data, unicode_keys=False) 7684 7685 :pool_size: How many open connections to make to the database object. 7686 :folder: where .table files will be created. 7687 automatically set within web2py 7688 use an explicit path when using DAL outside web2py 7689 :db_codec: string encoding of the database (default: 'UTF-8') 7690 :check_reserved: list of adapters to check tablenames and column names 7691 against sql/nosql reserved keywords. (Default None) 7692 7693 * 'common' List of sql keywords that are common to all database types 7694 such as "SELECT, INSERT". (recommended) 7695 * 'all' Checks against all known SQL keywords. (not recommended) 7696 <adaptername> Checks against the specific adapters list of keywords 7697 (recommended) 7698 * '<adaptername>_nonreserved' Checks against the specific adapters 7699 list of nonreserved keywords. (if available) 7700 :migrate (defaults to True) sets default migrate behavior for all tables 7701 :fake_migrate (defaults to False) sets default fake_migrate behavior for all tables 7702 :migrate_enabled (defaults to True). If set to False disables ALL migrations 7703 :fake_migrate_all (defaults to False). If sets to True fake migrates ALL tables 7704 :attempts (defaults to 5). Number of times to attempt connecting 7705 :auto_import (defaults to False). If set, import automatically table definitions from the 7706 databases folder 7707 :bigint_id (defaults to False): If set, turn on bigint instead of int for id fields 7708 :lazy_tables (defaults to False): delay table definition until table access 7709 :after_connection (defaults to None): a callable that will be execute after the connection 7710 """ 7711 if uri == '<zombie>' and db_uid is not None: return 7712 if not decode_credentials: 7713 credential_decoder = lambda cred: cred 7714 else: 7715 credential_decoder = lambda cred: urllib.unquote(cred) 7716 self._folder = folder 7717 if folder: 7718 self.set_folder(folder) 7719 self._uri = uri 7720 self._pool_size = pool_size 7721 self._db_codec = db_codec 7722 self._lastsql = '' 7723 self._timings = [] 7724 self._pending_references = {} 7725 self._request_tenant = 'request_tenant' 7726 self._common_fields = [] 7727 self._referee_name = '%(table)s' 7728 self._bigint_id = bigint_id 7729 self._debug = debug 7730 self._migrated = [] 7731 self._LAZY_TABLES = {} 7732 self._lazy_tables = lazy_tables 7733 self._tables = SQLCallableList() 7734 self._driver_args = driver_args 7735 self._adapter_args = adapter_args 7736 self._check_reserved = check_reserved 7737 self._decode_credentials = decode_credentials 7738 self._attempts = attempts 7739 self._do_connect = do_connect 7740 7741 if not str(attempts).isdigit() or attempts < 0: 7742 attempts = 5 7743 if uri: 7744 uris = isinstance(uri,(list,tuple)) and uri or [uri] 7745 error = '' 7746 connected = False 7747 for k in range(attempts): 7748 for uri in uris: 7749 try: 7750 if is_jdbc and not uri.startswith('jdbc:'): 7751 uri = 'jdbc:'+uri 7752 self._dbname = REGEX_DBNAME.match(uri).group() 7753 if not self._dbname in ADAPTERS: 7754 raise SyntaxError("Error in URI '%s' or database not supported" % self._dbname) 7755 # notice that driver args or {} else driver_args 7756 # defaults to {} global, not correct 7757 kwargs = dict(db=self,uri=uri, 7758 pool_size=pool_size, 7759 folder=folder, 7760 db_codec=db_codec, 7761 credential_decoder=credential_decoder, 7762 driver_args=driver_args or {}, 7763 adapter_args=adapter_args or {}, 7764 do_connect=do_connect, 7765 after_connection=after_connection) 7766 self._adapter = ADAPTERS[self._dbname](**kwargs) 7767 types = ADAPTERS[self._dbname].types 7768 # copy so multiple DAL() possible 7769 self._adapter.types = copy.copy(types) 7770 self._adapter.build_parsemap() 7771 if bigint_id: 7772 if 'big-id' in types and 'reference' in types: 7773 self._adapter.types['id'] = types['big-id'] 7774 self._adapter.types['reference'] = types['big-reference'] 7775 connected = True 7776 break 7777 except SyntaxError: 7778 raise 7779 except Exception: 7780 tb = traceback.format_exc() 7781 LOGGER.debug('DEBUG: connect attempt %i, connection error:\n%s' % (k, tb)) 7782 if connected: 7783 break 7784 else: 7785 time.sleep(1) 7786 if not connected: 7787 raise RuntimeError("Failure to connect, tried %d times:\n%s" % (attempts, tb)) 7788 else: 7789 self._adapter = BaseAdapter(db=self,pool_size=0, 7790 uri='None',folder=folder, 7791 db_codec=db_codec, after_connection=after_connection) 7792 migrate = fake_migrate = False 7793 adapter = self._adapter 7794 self._uri_hash = hashlib_md5(adapter.uri).hexdigest() 7795 self.check_reserved = check_reserved 7796 if self.check_reserved: 7797 from reserved_sql_keywords import ADAPTERS as RSK 7798 self.RSK = RSK 7799 self._migrate = migrate 7800 self._fake_migrate = fake_migrate 7801 self._migrate_enabled = migrate_enabled 7802 self._fake_migrate_all = fake_migrate_all 7803 if auto_import or tables: 7804 self.import_table_definitions(adapter.folder, 7805 tables=tables)
7806 7807 @property
7808 - def tables(self):
7809 return self._tables
7810
7811 - def import_table_definitions(self, path, migrate=False, 7812 fake_migrate=False, tables=None):
7813 pattern = pjoin(path,self._uri_hash+'_*.table') 7814 if tables: 7815 for table in tables: 7816 self.define_table(**table) 7817 else: 7818 for filename in glob.glob(pattern): 7819 tfile = self._adapter.file_open(filename, 'r') 7820 try: 7821 sql_fields = pickle.load(tfile) 7822 name = filename[len(pattern)-7:-6] 7823 mf = [(value['sortable'], 7824 Field(key, 7825 type=value['type'], 7826 length=value.get('length',None), 7827 notnull=value.get('notnull',False), 7828 unique=value.get('unique',False))) \ 7829 for key, value in sql_fields.iteritems()] 7830 mf.sort(lambda a,b: cmp(a[0],b[0])) 7831 self.define_table(name,*[item[1] for item in mf], 7832 **dict(migrate=migrate, 7833 fake_migrate=fake_migrate)) 7834 finally: 7835 self._adapter.file_close(tfile)
7836
7837 - def check_reserved_keyword(self, name):
7838 """ 7839 Validates ``name`` against SQL keywords 7840 Uses self.check_reserve which is a list of 7841 operators to use. 7842 self.check_reserved 7843 ['common', 'postgres', 'mysql'] 7844 self.check_reserved 7845 ['all'] 7846 """ 7847 for backend in self.check_reserved: 7848 if name.upper() in self.RSK[backend]: 7849 raise SyntaxError( 7850 'invalid table/column name "%s" is a "%s" reserved SQL/NOSQL keyword' % (name, backend.upper()))
7851
7852 - def parse_as_rest(self,patterns,args,vars,queries=None,nested_select=True):
7853 """ 7854 EXAMPLE: 7855 7856 db.define_table('person',Field('name'),Field('info')) 7857 db.define_table('pet',Field('ownedby',db.person),Field('name'),Field('info')) 7858 7859 @request.restful() 7860 def index(): 7861 def GET(*args,**vars): 7862 patterns = [ 7863 "/friends[person]", 7864 "/{person.name}/:field", 7865 "/{person.name}/pets[pet.ownedby]", 7866 "/{person.name}/pets[pet.ownedby]/{pet.name}", 7867 "/{person.name}/pets[pet.ownedby]/{pet.name}/:field", 7868 ("/dogs[pet]", db.pet.info=='dog'), 7869 ("/dogs[pet]/{pet.name.startswith}", db.pet.info=='dog'), 7870 ] 7871 parser = db.parse_as_rest(patterns,args,vars) 7872 if parser.status == 200: 7873 return dict(content=parser.response) 7874 else: 7875 raise HTTP(parser.status,parser.error) 7876 7877 def POST(table_name,**vars): 7878 if table_name == 'person': 7879 return db.person.validate_and_insert(**vars) 7880 elif table_name == 'pet': 7881 return db.pet.validate_and_insert(**vars) 7882 else: 7883 raise HTTP(400) 7884 return locals() 7885 """ 7886 7887 db = self 7888 re1 = REGEX_SEARCH_PATTERN 7889 re2 = REGEX_SQUARE_BRACKETS 7890 7891 def auto_table(table,base='',depth=0): 7892 patterns = [] 7893 for field in db[table].fields: 7894 if base: 7895 tag = '%s/%s' % (base,field.replace('_','-')) 7896 else: 7897 tag = '/%s/%s' % (table.replace('_','-'),field.replace('_','-')) 7898 f = db[table][field] 7899 if not f.readable: continue 7900 if f.type=='id' or 'slug' in field or f.type.startswith('reference'): 7901 tag += '/{%s.%s}' % (table,field) 7902 patterns.append(tag) 7903 patterns.append(tag+'/:field') 7904 elif f.type.startswith('boolean'): 7905 tag += '/{%s.%s}' % (table,field) 7906 patterns.append(tag) 7907 patterns.append(tag+'/:field') 7908 elif f.type in ('float','double','integer','bigint'): 7909 tag += '/{%s.%s.ge}/{%s.%s.lt}' % (table,field,table,field) 7910 patterns.append(tag) 7911 patterns.append(tag+'/:field') 7912 elif f.type.startswith('list:'): 7913 tag += '/{%s.%s.contains}' % (table,field) 7914 patterns.append(tag) 7915 patterns.append(tag+'/:field') 7916 elif f.type in ('date','datetime'): 7917 tag+= '/{%s.%s.year}' % (table,field) 7918 patterns.append(tag) 7919 patterns.append(tag+'/:field') 7920 tag+='/{%s.%s.month}' % (table,field) 7921 patterns.append(tag) 7922 patterns.append(tag+'/:field') 7923 tag+='/{%s.%s.day}' % (table,field) 7924 patterns.append(tag) 7925 patterns.append(tag+'/:field') 7926 if f.type in ('datetime','time'): 7927 tag+= '/{%s.%s.hour}' % (table,field) 7928 patterns.append(tag) 7929 patterns.append(tag+'/:field') 7930 tag+='/{%s.%s.minute}' % (table,field) 7931 patterns.append(tag) 7932 patterns.append(tag+'/:field') 7933 tag+='/{%s.%s.second}' % (table,field) 7934 patterns.append(tag) 7935 patterns.append(tag+'/:field') 7936 if depth>0: 7937 for f in db[table]._referenced_by: 7938 tag+='/%s[%s.%s]' % (table,f.tablename,f.name) 7939 patterns.append(tag) 7940 patterns += auto_table(table,base=tag,depth=depth-1) 7941 return patterns
7942 7943 if patterns == 'auto': 7944 patterns=[] 7945 for table in db.tables: 7946 if not table.startswith('auth_'): 7947 patterns.append('/%s[%s]' % (table,table)) 7948 patterns += auto_table(table,base='',depth=1) 7949 else: 7950 i = 0 7951 while i<len(patterns): 7952 pattern = patterns[i] 7953 if not isinstance(pattern,str): 7954 pattern = pattern[0] 7955 tokens = pattern.split('/') 7956 if tokens[-1].startswith(':auto') and re2.match(tokens[-1]): 7957 new_patterns = auto_table(tokens[-1][tokens[-1].find('[')+1:-1], 7958 '/'.join(tokens[:-1])) 7959 patterns = patterns[:i]+new_patterns+patterns[i+1:] 7960 i += len(new_patterns) 7961 else: 7962 i += 1 7963 if '/'.join(args) == 'patterns': 7964 return Row({'status':200,'pattern':'list', 7965 'error':None,'response':patterns}) 7966 for pattern in patterns: 7967 basequery, exposedfields = None, [] 7968 if isinstance(pattern,tuple): 7969 if len(pattern)==2: 7970 pattern, basequery = pattern 7971 elif len(pattern)>2: 7972 pattern, basequery, exposedfields = pattern[0:3] 7973 otable=table=None 7974 if not isinstance(queries,dict): 7975 dbset=db(queries) 7976 if basequery is not None: 7977 dbset = dbset(basequery) 7978 i=0 7979 tags = pattern[1:].split('/') 7980 if len(tags)!=len(args): 7981 continue 7982 for tag in tags: 7983 if re1.match(tag): 7984 # print 're1:'+tag 7985 tokens = tag[1:-1].split('.') 7986 table, field = tokens[0], tokens[1] 7987 if not otable or table == otable: 7988 if len(tokens)==2 or tokens[2]=='eq': 7989 query = db[table][field]==args[i] 7990 elif tokens[2]=='ne': 7991 query = db[table][field]!=args[i] 7992 elif tokens[2]=='lt': 7993 query = db[table][field]<args[i] 7994 elif tokens[2]=='gt': 7995 query = db[table][field]>args[i] 7996 elif tokens[2]=='ge': 7997 query = db[table][field]>=args[i] 7998 elif tokens[2]=='le': 7999 query = db[table][field]<=args[i] 8000 elif tokens[2]=='year': 8001 query = db[table][field].year()==args[i] 8002 elif tokens[2]=='month': 8003 query = db[table][field].month()==args[i] 8004 elif tokens[2]=='day': 8005 query = db[table][field].day()==args[i] 8006 elif tokens[2]=='hour': 8007 query = db[table][field].hour()==args[i] 8008 elif tokens[2]=='minute': 8009 query = db[table][field].minutes()==args[i] 8010 elif tokens[2]=='second': 8011 query = db[table][field].seconds()==args[i] 8012 elif tokens[2]=='startswith': 8013 query = db[table][field].startswith(args[i]) 8014 elif tokens[2]=='contains': 8015 query = db[table][field].contains(args[i]) 8016 else: 8017 raise RuntimeError("invalid pattern: %s" % pattern) 8018 if len(tokens)==4 and tokens[3]=='not': 8019 query = ~query 8020 elif len(tokens)>=4: 8021 raise RuntimeError("invalid pattern: %s" % pattern) 8022 if not otable and isinstance(queries,dict): 8023 dbset = db(queries[table]) 8024 if basequery is not None: 8025 dbset = dbset(basequery) 8026 dbset=dbset(query) 8027 else: 8028 raise RuntimeError("missing relation in pattern: %s" % pattern) 8029 elif re2.match(tag) and args[i]==tag[:tag.find('[')]: 8030 ref = tag[tag.find('[')+1:-1] 8031 if '.' in ref and otable: 8032 table,field = ref.split('.') 8033 selfld = '_id' 8034 if db[table][field].type.startswith('reference '): 8035 refs = [ x.name for x in db[otable] if x.type == db[table][field].type ] 8036 else: 8037 refs = [ x.name for x in db[table]._referenced_by if x.tablename==otable ] 8038 if refs: 8039 selfld = refs[0] 8040 if nested_select: 8041 try: 8042 dbset=db(db[table][field].belongs(dbset._select(db[otable][selfld]))) 8043 except ValueError: 8044 return Row({'status':400,'pattern':pattern, 8045 'error':'invalid path','response':None}) 8046 else: 8047 items = [item.id for item in dbset.select(db[otable][selfld])] 8048 dbset=db(db[table][field].belongs(items)) 8049 else: 8050 table = ref 8051 if not otable and isinstance(queries,dict): 8052 dbset = db(queries[table]) 8053 dbset=dbset(db[table]) 8054 elif tag==':field' and table: 8055 # print 're3:'+tag 8056 field = args[i] 8057 if not field in db[table]: break 8058 # hand-built patterns should respect .readable=False as well 8059 if not db[table][field].readable: 8060 return Row({'status':418,'pattern':pattern, 8061 'error':'I\'m a teapot','response':None}) 8062 try: 8063 distinct = vars.get('distinct', False) == 'True' 8064 offset = long(vars.get('offset',None) or 0) 8065 limits = (offset,long(vars.get('limit',None) or 1000)+offset) 8066 except ValueError: 8067 return Row({'status':400,'error':'invalid limits','response':None}) 8068 items = dbset.select(db[table][field], distinct=distinct, limitby=limits) 8069 if items: 8070 return Row({'status':200,'response':items, 8071 'pattern':pattern}) 8072 else: 8073 return Row({'status':404,'pattern':pattern, 8074 'error':'no record found','response':None}) 8075 elif tag != args[i]: 8076 break 8077 otable = table 8078 i += 1 8079 if i==len(tags) and table: 8080 ofields = vars.get('order',db[table]._id.name).split('|') 8081 try: 8082 orderby = [db[table][f] if not f.startswith('~') else ~db[table][f[1:]] for f in ofields] 8083 except (KeyError, AttributeError): 8084 return Row({'status':400,'error':'invalid orderby','response':None}) 8085 if exposedfields: 8086 fields = [field for field in db[table] if str(field).split('.')[-1] in exposedfields and field.readable] 8087 else: 8088 fields = [field for field in db[table] if field.readable] 8089 count = dbset.count() 8090 try: 8091 offset = long(vars.get('offset',None) or 0) 8092 limits = (offset,long(vars.get('limit',None) or 1000)+offset) 8093 except ValueError: 8094 return Row({'status':400,'error':'invalid limits','response':None}) 8095 if count > limits[1]-limits[0]: 8096 return Row({'status':400,'error':'too many records','response':None}) 8097 try: 8098 response = dbset.select(limitby=limits,orderby=orderby,*fields) 8099 except ValueError: 8100 return Row({'status':400,'pattern':pattern, 8101 'error':'invalid path','response':None}) 8102 return Row({'status':200,'response':response, 8103 'pattern':pattern,'count':count}) 8104 return Row({'status':400,'error':'no matching pattern','response':None})
8105
8106 - def define_table( 8107 self, 8108 tablename, 8109 *fields, 8110 **args 8111 ):
8112 if not fields and 'fields' in args: 8113 fields = args.get('fields',()) 8114 if not isinstance(tablename, str): 8115 if isinstance(tablename, unicode): 8116 try: 8117 tablename = str(tablename) 8118 except UnicodeEncodeError: 8119 raise SyntaxError("invalid unicode table name") 8120 else: 8121 raise SyntaxError("missing table name") 8122 elif hasattr(self,tablename) or tablename in self.tables: 8123 if not args.get('redefine',False): 8124 raise SyntaxError('table already defined: %s' % tablename) 8125 elif tablename.startswith('_') or hasattr(self,tablename) or \ 8126 REGEX_PYTHON_KEYWORDS.match(tablename): 8127 raise SyntaxError('invalid table name: %s' % tablename) 8128 elif self.check_reserved: 8129 self.check_reserved_keyword(tablename) 8130 else: 8131 invalid_args = set(args)-TABLE_ARGS 8132 if invalid_args: 8133 raise SyntaxError('invalid table "%s" attributes: %s' \ 8134 % (tablename,invalid_args)) 8135 if self._lazy_tables and not tablename in self._LAZY_TABLES: 8136 self._LAZY_TABLES[tablename] = (tablename,fields,args) 8137 table = None 8138 else: 8139 table = self.lazy_define_table(tablename,*fields,**args) 8140 if not tablename in self.tables: 8141 self.tables.append(tablename) 8142 return table
8143
8144 - def lazy_define_table( 8145 self, 8146 tablename, 8147 *fields, 8148 **args 8149 ):
8150 args_get = args.get 8151 common_fields = self._common_fields 8152 if common_fields: 8153 fields = list(fields) + list(common_fields) 8154 8155 table_class = args_get('table_class',Table) 8156 table = table_class(self, tablename, *fields, **args) 8157 table._actual = True 8158 self[tablename] = table 8159 # must follow above line to handle self references 8160 table._create_references() 8161 for field in table: 8162 if field.requires == DEFAULT: 8163 field.requires = sqlhtml_validators(field) 8164 8165 migrate = self._migrate_enabled and args_get('migrate',self._migrate) 8166 if migrate and not self._uri in (None,'None') \ 8167 or self._adapter.dbengine=='google:datastore': 8168 fake_migrate = self._fake_migrate_all or \ 8169 args_get('fake_migrate',self._fake_migrate) 8170 polymodel = args_get('polymodel',None) 8171 try: 8172 GLOBAL_LOCKER.acquire() 8173 self._lastsql = self._adapter.create_table( 8174 table,migrate=migrate, 8175 fake_migrate=fake_migrate, 8176 polymodel=polymodel) 8177 finally: 8178 GLOBAL_LOCKER.release() 8179 else: 8180 table._dbt = None 8181 on_define = args_get('on_define',None) 8182 if on_define: on_define(table) 8183 return table
8184
8185 - def as_dict(self, flat=False, sanitize=True):
8186 db_uid = uri = None 8187 if not sanitize: 8188 uri, db_uid = (self._uri, self._db_uid) 8189 db_as_dict = dict(tables=[], uri=uri, db_uid=db_uid, 8190 **dict([(k, getattr(self, "_" + k, None)) 8191 for k in 'pool_size','folder','db_codec', 8192 'check_reserved','migrate','fake_migrate', 8193 'migrate_enabled','fake_migrate_all', 8194 'decode_credentials','driver_args', 8195 'adapter_args', 'attempts', 8196 'bigint_id','debug','lazy_tables', 8197 'do_connect'])) 8198 for table in self: 8199 db_as_dict["tables"].append(table.as_dict(flat=flat, 8200 sanitize=sanitize)) 8201 return db_as_dict
8202
8203 - def as_xml(self, sanitize=True):
8204 if not have_serializers: 8205 raise ImportError("No xml serializers available") 8206 d = self.as_dict(flat=True, sanitize=sanitize) 8207 return serializers.xml(d)
8208
8209 - def as_json(self, sanitize=True):
8210 if not have_serializers: 8211 raise ImportError("No json serializers available") 8212 d = self.as_dict(flat=True, sanitize=sanitize) 8213 return serializers.json(d)
8214
8215 - def as_yaml(self, sanitize=True):
8216 if not have_serializers: 8217 raise ImportError("No YAML serializers available") 8218 d = self.as_dict(flat=True, sanitize=sanitize) 8219 return serializers.yaml(d)
8220
8221 - def __contains__(self, tablename):
8222 try: 8223 return tablename in self.tables 8224 except AttributeError: 8225 # The instance has no .tables attribute yet 8226 return False
8227 8228 has_key = __contains__ 8229
8230 - def get(self,key,default=None):
8231 return self.__dict__.get(key,default)
8232
8233 - def __iter__(self):
8234 for tablename in self.tables: 8235 yield self[tablename]
8236
8237 - def __getitem__(self, key):
8238 return self.__getattr__(str(key))
8239
8240 - def __getattr__(self, key):
8241 if ogetattr(self,'_lazy_tables') and \ 8242 key in ogetattr(self,'_LAZY_TABLES'): 8243 tablename, fields, args = self._LAZY_TABLES.pop(key) 8244 return self.lazy_define_table(tablename,*fields,**args) 8245 return ogetattr(self, key)
8246
8247 - def __setitem__(self, key, value):
8248 osetattr(self, str(key), value)
8249
8250 - def __setattr__(self, key, value):
8251 if key[:1]!='_' and key in self: 8252 raise SyntaxError( 8253 'Object %s exists and cannot be redefined' % key) 8254 osetattr(self,key,value)
8255 8256 __delitem__ = object.__delattr__ 8257
8258 - def __repr__(self):
8259 if hasattr(self,'_uri'): 8260 return '<DAL uri="%s">' % hide_password(self._adapter.uri) 8261 else: 8262 return '<DAL db_uid="%s">' % self._db_uid
8263
8264 - def smart_query(self,fields,text):
8265 return Set(self, smart_query(fields,text))
8266
8267 - def __call__(self, query=None, ignore_common_filters=None):
8268 if isinstance(query,Table): 8269 query = self._adapter.id_query(query) 8270 elif isinstance(query,Field): 8271 query = query!=None 8272 elif isinstance(query, dict): 8273 icf = query.get("ignore_common_filters") 8274 if icf: ignore_common_filters = icf 8275 return Set(self, query, ignore_common_filters=ignore_common_filters)
8276
8277 - def commit(self):
8278 self._adapter.commit()
8279
8280 - def rollback(self):
8281 self._adapter.rollback()
8282
8283 - def close(self):
8284 self._adapter.close() 8285 if self._db_uid in THREAD_LOCAL.db_instances: 8286 db_group = THREAD_LOCAL.db_instances[self._db_uid] 8287 db_group.remove(self) 8288 if not db_group: 8289 del THREAD_LOCAL.db_instances[self._db_uid]
8290
8291 - def executesql(self, query, placeholders=None, as_dict=False, 8292 fields=None, colnames=None, as_ordered_dict=False):
8293 """ 8294 placeholders is optional and will always be None. 8295 If using raw SQL with placeholders, placeholders may be 8296 a sequence of values to be substituted in 8297 or, (if supported by the DB driver), a dictionary with keys 8298 matching named placeholders in your SQL. 8299 8300 Added 2009-12-05 "as_dict" optional argument. Will always be 8301 None when using DAL. If using raw SQL can be set to True and 8302 the results cursor returned by the DB driver will be converted 8303 to a sequence of dictionaries keyed with the db field 8304 names. Tested with SQLite but should work with any database 8305 since the cursor.description used to get field names is part 8306 of the Python dbi 2.0 specs. Results returned with 8307 as_dict=True are the same as those returned when applying 8308 .to_list() to a DAL query. If "as_ordered_dict"=True the 8309 behaviour is the same as when "as_dict"=True with the keys 8310 (field names) guaranteed to be in the same order as returned 8311 by the select name executed on the database. 8312 8313 [{field1: value1, field2: value2}, {field1: value1b, field2: value2b}] 8314 8315 Added 2012-08-24 "fields" and "colnames" optional arguments. If either 8316 is provided, the results cursor returned by the DB driver will be 8317 converted to a DAL Rows object using the db._adapter.parse() method. 8318 8319 The "fields" argument is a list of DAL Field objects that match the 8320 fields returned from the DB. The Field objects should be part of one or 8321 more Table objects defined on the DAL object. The "fields" list can 8322 include one or more DAL Table objects in addition to or instead of 8323 including Field objects, or it can be just a single table (not in a 8324 list). In that case, the Field objects will be extracted from the 8325 table(s). 8326 8327 Instead of specifying the "fields" argument, the "colnames" argument 8328 can be specified as a list of field names in tablename.fieldname format. 8329 Again, these should represent tables and fields defined on the DAL 8330 object. 8331 8332 It is also possible to specify both "fields" and the associated 8333 "colnames". In that case, "fields" can also include DAL Expression 8334 objects in addition to Field objects. For Field objects in "fields", 8335 the associated "colnames" must still be in tablename.fieldname format. 8336 For Expression objects in "fields", the associated "colnames" can 8337 be any arbitrary labels. 8338 8339 Note, the DAL Table objects referred to by "fields" or "colnames" can 8340 be dummy tables and do not have to represent any real tables in the 8341 database. Also, note that the "fields" and "colnames" must be in the 8342 same order as the fields in the results cursor returned from the DB. 8343 8344 """ 8345 adapter = self._adapter 8346 if placeholders: 8347 adapter.execute(query, placeholders) 8348 else: 8349 adapter.execute(query) 8350 if as_dict or as_ordered_dict: 8351 if not hasattr(adapter.cursor,'description'): 8352 raise RuntimeError("database does not support executesql(...,as_dict=True)") 8353 # Non-DAL legacy db query, converts cursor results to dict. 8354 # sequence of 7-item sequences. each sequence tells about a column. 8355 # first item is always the field name according to Python Database API specs 8356 columns = adapter.cursor.description 8357 # reduce the column info down to just the field names 8358 fields = colnames or [f[0] for f in columns] 8359 # will hold our finished resultset in a list 8360 data = adapter._fetchall() 8361 # convert the list for each row into a dictionary so it's 8362 # easier to work with. row['field_name'] rather than row[0] 8363 if as_ordered_dict: 8364 _dict = OrderedDict 8365 else: 8366 _dict = dict 8367 return [_dict(zip(fields,row)) for row in data] 8368 try: 8369 data = adapter._fetchall() 8370 except: 8371 return None 8372 if fields or colnames: 8373 fields = [] if fields is None else fields 8374 if not isinstance(fields, list): 8375 fields = [fields] 8376 extracted_fields = [] 8377 for field in fields: 8378 if isinstance(field, Table): 8379 extracted_fields.extend([f for f in field]) 8380 else: 8381 extracted_fields.append(field) 8382 if not colnames: 8383 colnames = ['%s.%s' % (f.tablename, f.name) 8384 for f in extracted_fields] 8385 data = adapter.parse( 8386 data, fields=extracted_fields, colnames=colnames) 8387 return data
8388
8389 - def _remove_references_to(self, thistable):
8390 for table in self: 8391 table._referenced_by = [field for field in table._referenced_by 8392 if not field.table==thistable]
8393
8394 - def export_to_csv_file(self, ofile, *args, **kwargs):
8395 step = long(kwargs.get('max_fetch_rows,',500)) 8396 write_colnames = kwargs['write_colnames'] = \ 8397 kwargs.get("write_colnames", True) 8398 for table in self.tables: 8399 ofile.write('TABLE %s\r\n' % table) 8400 query = self._adapter.id_query(self[table]) 8401 nrows = self(query).count() 8402 kwargs['write_colnames'] = write_colnames 8403 for k in range(0,nrows,step): 8404 self(query).select(limitby=(k,k+step)).export_to_csv_file( 8405 ofile, *args, **kwargs) 8406 kwargs['write_colnames'] = False 8407 ofile.write('\r\n\r\n') 8408 ofile.write('END')
8409
8410 - def import_from_csv_file(self, ifile, id_map=None, null='<NULL>', 8411 unique='uuid', map_tablenames=None, 8412 ignore_missing_tables=False, 8413 *args, **kwargs):
8414 #if id_map is None: id_map={} 8415 id_offset = {} # only used if id_map is None 8416 map_tablenames = map_tablenames or {} 8417 for line in ifile: 8418 line = line.strip() 8419 if not line: 8420 continue 8421 elif line == 'END': 8422 return 8423 elif not line.startswith('TABLE ') or \ 8424 not line[6:] in self.tables: 8425 raise SyntaxError('invalid file format') 8426 else: 8427 tablename = line[6:] 8428 tablename = map_tablenames.get(tablename,tablename) 8429 if tablename is not None and tablename in self.tables: 8430 self[tablename].import_from_csv_file( 8431 ifile, id_map, null, unique, id_offset, 8432 *args, **kwargs) 8433 elif tablename is None or ignore_missing_tables: 8434 # skip all non-empty lines 8435 for line in ifile: 8436 if not line.strip(): 8437 break 8438 else: 8439 raise RuntimeError("Unable to import table that does not exist.\nTry db.import_from_csv_file(..., map_tablenames={'table':'othertable'},ignore_missing_tables=True)")
8440
8441 8442 -def DAL_unpickler(db_uid):
8443 return DAL('<zombie>',db_uid=db_uid)
8444
8445 -def DAL_pickler(db):
8446 return DAL_unpickler, (db._db_uid,)
8447 8448 copyreg.pickle(DAL, DAL_pickler, DAL_unpickler)
8449 8450 -class SQLALL(object):
8451 """ 8452 Helper class providing a comma-separated string having all the field names 8453 (prefixed by table name and '.') 8454 8455 normally only called from within gluon.sql 8456 """ 8457
8458 - def __init__(self, table):
8459 self._table = table
8460
8461 - def __str__(self):
8462 return ', '.join([str(field) for field in self._table])
8463
8464 # class Reference(int): 8465 -class Reference(long):
8466
8467 - def __allocate(self):
8468 if not self._record: 8469 self._record = self._table[long(self)] 8470 if not self._record: 8471 raise RuntimeError( 8472 "Using a recursive select but encountered a broken reference: %s %d"%(self._table, long(self)))
8473
8474 - def __getattr__(self, key):
8475 if key == 'id': 8476 return long(self) 8477 if key in self._table: 8478 self.__allocate() 8479 if self._record: 8480 return self._record.get(key,None) # to deal with case self.update_record() 8481 else: 8482 return None
8483
8484 - def get(self, key, default=None):
8485 return self.__getattr__(key, default)
8486
8487 - def __setattr__(self, key, value):
8488 if key.startswith('_'): 8489 long.__setattr__(self, key, value) 8490 return 8491 self.__allocate() 8492 self._record[key] = value
8493
8494 - def __getitem__(self, key):
8495 if key == 'id': 8496 return long(self) 8497 self.__allocate() 8498 return self._record.get(key, None)
8499
8500 - def __setitem__(self,key,value):
8501 self.__allocate() 8502 self._record[key] = value
8503
8504 8505 -def Reference_unpickler(data):
8506 return marshal.loads(data)
8507
8508 -def Reference_pickler(data):
8509 try: 8510 marshal_dump = marshal.dumps(long(data)) 8511 except AttributeError: 8512 marshal_dump = 'i%s' % struct.pack('<i', long(data)) 8513 return (Reference_unpickler, (marshal_dump,))
8514 8515 copyreg.pickle(Reference, Reference_pickler, Reference_unpickler)
8516 8517 -class MethodAdder(object):
8518 - def __init__(self,table):
8519 self.table = table
8520 - def __call__(self):
8521 return self.register()
8522 - def __getattr__(self,method_name):
8523 return self.register(method_name)
8524 - def register(self,method_name=None):
8525 def _decorated(f): 8526 instance = self.table 8527 import types 8528 method = types.MethodType(f, instance, instance.__class__) 8529 name = method_name or f.func_name 8530 setattr(instance, name, method) 8531 return f
8532 return _decorated
8533
8534 -class Table(object):
8535 8536 """ 8537 an instance of this class represents a database table 8538 8539 Example:: 8540 8541 db = DAL(...) 8542 db.define_table('users', Field('name')) 8543 db.users.insert(name='me') # print db.users._insert(...) to see SQL 8544 db.users.drop() 8545 """ 8546
8547 - def __init__( 8548 self, 8549 db, 8550 tablename, 8551 *fields, 8552 **args 8553 ):
8554 """ 8555 Initializes the table and performs checking on the provided fields. 8556 8557 Each table will have automatically an 'id'. 8558 8559 If a field is of type Table, the fields (excluding 'id') from that table 8560 will be used instead. 8561 8562 :raises SyntaxError: when a supplied field is of incorrect type. 8563 """ 8564 self._actual = False # set to True by define_table() 8565 self._tablename = tablename 8566 self._ot = None # args.get('rname') 8567 self._rname = args.get('rname') 8568 if not self._rname: 8569 self._sequence_name = args.get('sequence_name') or \ 8570 db and db._adapter.sequence_name(tablename) 8571 else: 8572 tb = self._rname[1:-1] 8573 self._sequence_name = args.get('sequence_name') or \ 8574 db and db._adapter.sequence_name(tb) 8575 self._trigger_name = args.get('trigger_name') or \ 8576 db and db._adapter.trigger_name(tablename) 8577 self._common_filter = args.get('common_filter') 8578 self._format = args.get('format') 8579 self._singular = args.get( 8580 'singular',tablename.replace('_',' ').capitalize()) 8581 self._plural = args.get( 8582 'plural',pluralize(self._singular.lower()).capitalize()) 8583 # horrible but for backard compatibility of appamdin: 8584 if 'primarykey' in args and args['primarykey'] is not None: 8585 self._primarykey = args.get('primarykey') 8586 8587 self._before_insert = [] 8588 self._before_update = [Set.delete_uploaded_files] 8589 self._before_delete = [Set.delete_uploaded_files] 8590 self._after_insert = [] 8591 self._after_update = [] 8592 self._after_delete = [] 8593 8594 self.add_method = MethodAdder(self) 8595 8596 fieldnames,newfields=set(),[] 8597 _primarykey = getattr(self, '_primarykey', None) 8598 if _primarykey is not None: 8599 if not isinstance(_primarykey, list): 8600 raise SyntaxError( 8601 "primarykey must be a list of fields from table '%s'" \ 8602 % tablename) 8603 if len(_primarykey)==1: 8604 self._id = [f for f in fields if isinstance(f,Field) \ 8605 and f.name==_primarykey[0]][0] 8606 elif not [f for f in fields if (isinstance(f,Field) and 8607 f.type=='id') or (isinstance(f, dict) and 8608 f.get("type", None)=="id")]: 8609 field = Field('id', 'id') 8610 newfields.append(field) 8611 fieldnames.add('id') 8612 self._id = field 8613 virtual_fields = [] 8614 def include_new(field): 8615 newfields.append(field) 8616 fieldnames.add(field.name) 8617 if field.type=='id': 8618 self._id = field
8619 for field in fields: 8620 if isinstance(field, (FieldMethod, FieldVirtual)): 8621 virtual_fields.append(field) 8622 elif isinstance(field, Field) and not field.name in fieldnames: 8623 if field.db is not None: 8624 field = copy.copy(field) 8625 include_new(field) 8626 elif isinstance(field, dict) and not field['fieldname'] in fieldnames: 8627 include_new(Field(**field)) 8628 elif isinstance(field, Table): 8629 table = field 8630 for field in table: 8631 if not field.name in fieldnames and not field.type=='id': 8632 t2 = not table._actual and self._tablename 8633 include_new(field.clone(point_self_references_to=t2)) 8634 elif not isinstance(field, (Field, Table)): 8635 raise SyntaxError( 8636 'define_table argument is not a Field or Table: %s' % field) 8637 fields = newfields 8638 self._db = db 8639 tablename = tablename 8640 self._fields = SQLCallableList() 8641 self.virtualfields = [] 8642 fields = list(fields) 8643 8644 if db and db._adapter.uploads_in_blob==True: 8645 uploadfields = [f.name for f in fields if f.type=='blob'] 8646 for field in fields: 8647 fn = field.uploadfield 8648 if isinstance(field, Field) and field.type == 'upload'\ 8649 and fn is True: 8650 fn = field.uploadfield = '%s_blob' % field.name 8651 if isinstance(fn,str) and not fn in uploadfields: 8652 fields.append(Field(fn,'blob',default='', 8653 writable=False,readable=False)) 8654 8655 lower_fieldnames = set() 8656 reserved = dir(Table) + ['fields'] 8657 if (db and db.check_reserved): 8658 check_reserved = db.check_reserved_keyword 8659 else: 8660 def check_reserved(field_name): 8661 if field_name in reserved: 8662 raise SyntaxError("field name %s not allowed" % field_name)
8663 for field in fields: 8664 field_name = field.name 8665 check_reserved(field_name) 8666 fn_lower = field_name.lower() 8667 if fn_lower in lower_fieldnames: 8668 raise SyntaxError("duplicate field %s in table %s" \ 8669 % (field_name, tablename)) 8670 else: 8671 lower_fieldnames.add(fn_lower) 8672 8673 self.fields.append(field_name) 8674 self[field_name] = field 8675 if field.type == 'id': 8676 self['id'] = field 8677 field.tablename = field._tablename = tablename 8678 field.table = field._table = self 8679 field.db = field._db = db 8680 self.ALL = SQLALL(self) 8681 8682 if _primarykey is not None: 8683 for k in _primarykey: 8684 if k not in self.fields: 8685 raise SyntaxError( 8686 "primarykey must be a list of fields from table '%s " % tablename) 8687 else: 8688 self[k].notnull = True 8689 for field in virtual_fields: 8690 self[field.name] = field 8691 8692 @property
8693 - def fields(self):
8694 return self._fields
8695
8696 - def update(self,*args,**kwargs):
8697 raise RuntimeError("Syntax Not Supported")
8698
8699 - def _enable_record_versioning(self, 8700 archive_db=None, 8701 archive_name = '%(tablename)s_archive', 8702 is_active = 'is_active', 8703 current_record = 'current_record', 8704 current_record_label = None):
8705 db = self._db 8706 archive_db = archive_db or db 8707 archive_name = archive_name % dict(tablename=self._tablename) 8708 if archive_name in archive_db.tables(): 8709 return # do not try define the archive if already exists 8710 fieldnames = self.fields() 8711 same_db = archive_db is db 8712 field_type = self if same_db else 'bigint' 8713 clones = [] 8714 for field in self: 8715 nfk = same_db or not field.type.startswith('reference') 8716 clones.append(field.clone( 8717 unique=False, type=field.type if nfk else 'bigint')) 8718 archive_db.define_table( 8719 archive_name, 8720 Field(current_record,field_type,label=current_record_label), 8721 *clones,**dict(format=self._format)) 8722 8723 self._before_update.append( 8724 lambda qset,fs,db=archive_db,an=archive_name,cn=current_record: 8725 archive_record(qset,fs,db[an],cn)) 8726 if is_active and is_active in fieldnames: 8727 self._before_delete.append( 8728 lambda qset: qset.update(is_active=False)) 8729 newquery = lambda query, t=self, name=self._tablename: \ 8730 reduce(AND,[db[tn].is_active == True 8731 for tn in db._adapter.tables(query) 8732 if tn==name or getattr(db[tn],'_ot',None)==name]) 8733 query = self._common_filter 8734 if query: 8735 newquery = query & newquery 8736 self._common_filter = newquery
8737
8738 - def _validate(self,**vars):
8739 errors = Row() 8740 for key,value in vars.iteritems(): 8741 value,error = self[key].validate(value) 8742 if error: 8743 errors[key] = error 8744 return errors
8745
8746 - def _create_references(self):
8747 db = self._db 8748 pr = db._pending_references 8749 self._referenced_by = [] 8750 self._references = [] 8751 for field in self: 8752 fieldname = field.name 8753 field_type = field.type 8754 if isinstance(field_type,str) and field_type[:10] == 'reference ': 8755 ref = field_type[10:].strip() 8756 if not ref: 8757 SyntaxError('Table: reference to nothing: %s' %ref) 8758 if '.' in ref: 8759 rtablename, throw_it,rfieldname = ref.partition('.') 8760 else: 8761 rtablename, rfieldname = ref, None 8762 if not rtablename in db: 8763 pr[rtablename] = pr.get(rtablename,[]) + [field] 8764 continue 8765 rtable = db[rtablename] 8766 if rfieldname: 8767 if not hasattr(rtable,'_primarykey'): 8768 raise SyntaxError( 8769 'keyed tables can only reference other keyed tables (for now)') 8770 if rfieldname not in rtable.fields: 8771 raise SyntaxError( 8772 "invalid field '%s' for referenced table '%s' in table '%s'" \ 8773 % (rfieldname, rtablename, self._tablename)) 8774 rfield = rtable[rfieldname] 8775 else: 8776 rfield = rtable._id 8777 rtable._referenced_by.append(field) 8778 field.referent = rfield 8779 self._references.append(field) 8780 else: 8781 field.referent = None 8782 if self._tablename in pr: 8783 referees = pr.pop(self._tablename) 8784 for referee in referees: 8785 self._referenced_by.append(referee)
8786 8787
8788 - def _filter_fields(self, record, id=False):
8789 return dict([(k, v) for (k, v) in record.iteritems() if k 8790 in self.fields and (self[k].type!='id' or id)])
8791
8792 - def _build_query(self,key):
8793 """ for keyed table only """ 8794 query = None 8795 for k,v in key.iteritems(): 8796 if k in self._primarykey: 8797 if query: 8798 query = query & (self[k] == v) 8799 else: 8800 query = (self[k] == v) 8801 else: 8802 raise SyntaxError( 8803 'Field %s is not part of the primary key of %s' % \ 8804 (k,self._tablename)) 8805 return query
8806
8807 - def __getitem__(self, key):
8808 if not key: 8809 return None 8810 elif isinstance(key, dict): 8811 """ for keyed table """ 8812 query = self._build_query(key) 8813 return self._db(query).select(limitby=(0,1), orderby_on_limitby=False).first() 8814 elif str(key).isdigit() or 'google' in DRIVERS and isinstance(key, Key): 8815 return self._db(self._id == key).select(limitby=(0,1), orderby_on_limitby=False).first() 8816 elif key: 8817 return ogetattr(self, str(key))
8818
8819 - def __call__(self, key=DEFAULT, **kwargs):
8820 for_update = kwargs.get('_for_update',False) 8821 if '_for_update' in kwargs: del kwargs['_for_update'] 8822 8823 orderby = kwargs.get('_orderby',None) 8824 if '_orderby' in kwargs: del kwargs['_orderby'] 8825 8826 if not key is DEFAULT: 8827 if isinstance(key, Query): 8828 record = self._db(key).select( 8829 limitby=(0,1),for_update=for_update, orderby=orderby, orderby_on_limitby=False).first() 8830 elif not str(key).isdigit(): 8831 record = None 8832 else: 8833 record = self._db(self._id == key).select( 8834 limitby=(0,1),for_update=for_update, orderby=orderby, orderby_on_limitby=False).first() 8835 if record: 8836 for k,v in kwargs.iteritems(): 8837 if record[k]!=v: return None 8838 return record 8839 elif kwargs: 8840 query = reduce(lambda a,b:a&b,[self[k]==v for k,v in kwargs.iteritems()]) 8841 return self._db(query).select(limitby=(0,1),for_update=for_update, orderby=orderby, orderby_on_limitby=False).first() 8842 else: 8843 return None
8844
8845 - def __setitem__(self, key, value):
8846 if isinstance(key, dict) and isinstance(value, dict): 8847 """ option for keyed table """ 8848 if set(key.keys()) == set(self._primarykey): 8849 value = self._filter_fields(value) 8850 kv = {} 8851 kv.update(value) 8852 kv.update(key) 8853 if not self.insert(**kv): 8854 query = self._build_query(key) 8855 self._db(query).update(**self._filter_fields(value)) 8856 else: 8857 raise SyntaxError( 8858 'key must have all fields from primary key: %s'%\ 8859 (self._primarykey)) 8860 elif str(key).isdigit(): 8861 if key == 0: 8862 self.insert(**self._filter_fields(value)) 8863 elif self._db(self._id == key)\ 8864 .update(**self._filter_fields(value)) is None: 8865 raise SyntaxError('No such record: %s' % key) 8866 else: 8867 if isinstance(key, dict): 8868 raise SyntaxError( 8869 'value must be a dictionary: %s' % value) 8870 osetattr(self, str(key), value)
8871 8872 __getattr__ = __getitem__ 8873
8874 - def __setattr__(self, key, value):
8875 if key[:1]!='_' and key in self: 8876 raise SyntaxError('Object exists and cannot be redefined: %s' % key) 8877 osetattr(self,key,value)
8878
8879 - def __delitem__(self, key):
8880 if isinstance(key, dict): 8881 query = self._build_query(key) 8882 if not self._db(query).delete(): 8883 raise SyntaxError('No such record: %s' % key) 8884 elif not str(key).isdigit() or \ 8885 not self._db(self._id == key).delete(): 8886 raise SyntaxError('No such record: %s' % key)
8887
8888 - def __contains__(self,key):
8889 return hasattr(self,key)
8890 8891 has_key = __contains__ 8892
8893 - def items(self):
8894 return self.__dict__.items()
8895
8896 - def __iter__(self):
8897 for fieldname in self.fields: 8898 yield self[fieldname]
8899
8900 - def iteritems(self):
8901 return self.__dict__.iteritems()
8902 8903
8904 - def __repr__(self):
8905 return '<Table %s (%s)>' % (self._tablename,','.join(self.fields()))
8906
8907 - def __str__(self):
8908 if self._ot is not None: 8909 ot = self._ot 8910 if 'Oracle' in str(type(self._db._adapter)): 8911 return '%s %s' % (ot, self._tablename) 8912 return '%s AS %s' % (ot, self._tablename) 8913 return self._tablename
8914
8915 - def _drop(self, mode = ''):
8916 return self._db._adapter._drop(self, mode)
8917
8918 - def drop(self, mode = ''):
8919 return self._db._adapter.drop(self,mode)
8920
8921 - def _listify(self,fields,update=False):
8922 new_fields = {} # format: new_fields[name] = (field,value) 8923 8924 # store all fields passed as input in new_fields 8925 for name in fields: 8926 if not name in self.fields: 8927 if name != 'id': 8928 raise SyntaxError( 8929 'Field %s does not belong to the table' % name) 8930 else: 8931 field = self[name] 8932 value = fields[name] 8933 if field.filter_in: 8934 value = field.filter_in(value) 8935 new_fields[name] = (field,value) 8936 8937 # check all fields that should be in the table but are not passed 8938 to_compute = [] 8939 for ofield in self: 8940 name = ofield.name 8941 if not name in new_fields: 8942 # if field is supposed to be computed, compute it! 8943 if ofield.compute: # save those to compute for later 8944 to_compute.append((name,ofield)) 8945 # if field is required, check its default value 8946 elif not update and not ofield.default is None: 8947 value = ofield.default 8948 fields[name] = value 8949 new_fields[name] = (ofield,value) 8950 # if this is an update, user the update field instead 8951 elif update and not ofield.update is None: 8952 value = ofield.update 8953 fields[name] = value 8954 new_fields[name] = (ofield,value) 8955 # if the field is still not there but it should, error 8956 elif not update and ofield.required: 8957 raise RuntimeError( 8958 'Table: missing required field: %s' % name) 8959 # now deal with fields that are supposed to be computed 8960 if to_compute: 8961 row = Row(fields) 8962 for name,ofield in to_compute: 8963 # try compute it 8964 try: 8965 row[name] = new_value = ofield.compute(row) 8966 new_fields[name] = (ofield, new_value) 8967 except (KeyError, AttributeError): 8968 # error silently unless field is required! 8969 if ofield.required: 8970 raise SyntaxError('unable to compute field: %s' % name) 8971 return new_fields.values()
8972
8973 - def _attempt_upload(self, fields):
8974 for field in self: 8975 if field.type=='upload' and field.name in fields: 8976 value = fields[field.name] 8977 if value is not None and not isinstance(value,str): 8978 if hasattr(value,'file') and hasattr(value,'filename'): 8979 new_name = field.store(value.file,filename=value.filename) 8980 elif hasattr(value,'read') and hasattr(value,'name'): 8981 new_name = field.store(value,filename=value.name) 8982 else: 8983 raise RuntimeError("Unable to handle upload") 8984 fields[field.name] = new_name
8985
8986 - def _defaults(self, fields):
8987 "If there are no fields/values specified, return table defaults" 8988 if not fields: 8989 fields = {} 8990 for field in self: 8991 if field.type != "id": 8992 fields[field.name] = field.default 8993 return fields
8994
8995 - def _insert(self, **fields):
8996 fields = self._defaults(fields) 8997 return self._db._adapter._insert(self, self._listify(fields))
8998
8999 - def insert(self, **fields):
9000 fields = self._defaults(fields) 9001 self._attempt_upload(fields) 9002 if any(f(fields) for f in self._before_insert): return 0 9003 ret = self._db._adapter.insert(self, self._listify(fields)) 9004 if ret and self._after_insert: 9005 fields = Row(fields) 9006 [f(fields,ret) for f in self._after_insert] 9007 return ret
9008
9009 - def validate_and_insert(self,**fields):
9010 response = Row() 9011 response.errors = Row() 9012 new_fields = copy.copy(fields) 9013 for key,value in fields.iteritems(): 9014 value,error = self[key].validate(value) 9015 if error: 9016 response.errors[key] = "%s" % error 9017 else: 9018 new_fields[key] = value 9019 if not response.errors: 9020 response.id = self.insert(**new_fields) 9021 else: 9022 response.id = None 9023 return response
9024
9025 - def validate_and_update(self, _key=DEFAULT, **fields):
9026 response = Row() 9027 response.errors = Row() 9028 new_fields = copy.copy(fields) 9029 9030 for key,value in fields.iteritems(): 9031 value,error = self[key].validate(value) 9032 if error: 9033 response.errors[key] = "%s" % error 9034 else: 9035 new_fields[key] = value 9036 9037 if _key is DEFAULT: 9038 record = self(**values) 9039 elif isinstance(_key,dict): 9040 record = self(**_key) 9041 else: 9042 record = self(_key) 9043 9044 if not response.errors and record: 9045 row = self._db(self._id==_key) 9046 response.id = row.update(**fields) 9047 else: 9048 response.id = None 9049 return response
9050
9051 - def update_or_insert(self, _key=DEFAULT, **values):
9052 if _key is DEFAULT: 9053 record = self(**values) 9054 elif isinstance(_key,dict): 9055 record = self(**_key) 9056 else: 9057 record = self(_key) 9058 if record: 9059 record.update_record(**values) 9060 newid = None 9061 else: 9062 newid = self.insert(**values) 9063 return newid
9064
9065 - def bulk_insert(self, items):
9066 """ 9067 here items is a list of dictionaries 9068 """ 9069 items = [self._listify(item) for item in items] 9070 if any(f(item) for item in items for f in self._before_insert):return 0 9071 ret = self._db._adapter.bulk_insert(self,items) 9072 ret and [[f(item,ret[k]) for k,item in enumerate(items)] for f in self._after_insert] 9073 return ret
9074
9075 - def _truncate(self, mode = None):
9076 return self._db._adapter._truncate(self, mode)
9077
9078 - def truncate(self, mode = None):
9079 return self._db._adapter.truncate(self, mode)
9080
9081 - def import_from_csv_file( 9082 self, 9083 csvfile, 9084 id_map=None, 9085 null='<NULL>', 9086 unique='uuid', 9087 id_offset=None, # id_offset used only when id_map is None 9088 *args, **kwargs 9089 ):
9090 """ 9091 Import records from csv file. 9092 Column headers must have same names as table fields. 9093 Field 'id' is ignored. 9094 If column names read 'table.file' the 'table.' prefix is ignored. 9095 'unique' argument is a field which must be unique 9096 (typically a uuid field) 9097 'restore' argument is default False; 9098 if set True will remove old values in table first. 9099 'id_map' if set to None will not map ids. 9100 The import will keep the id numbers in the restored table. 9101 This assumes that there is an field of type id that 9102 is integer and in incrementing order. 9103 Will keep the id numbers in restored table. 9104 """ 9105 9106 delimiter = kwargs.get('delimiter', ',') 9107 quotechar = kwargs.get('quotechar', '"') 9108 quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) 9109 restore = kwargs.get('restore', False) 9110 if restore: 9111 self._db[self].truncate() 9112 9113 reader = csv.reader(csvfile, delimiter=delimiter, 9114 quotechar=quotechar, quoting=quoting) 9115 colnames = None 9116 if isinstance(id_map, dict): 9117 if not self._tablename in id_map: 9118 id_map[self._tablename] = {} 9119 id_map_self = id_map[self._tablename] 9120 9121 def fix(field, value, id_map, id_offset): 9122 list_reference_s='list:reference' 9123 if value == null: 9124 value = None 9125 elif field.type=='blob': 9126 value = base64.b64decode(value) 9127 elif field.type=='double' or field.type=='float': 9128 if not value.strip(): 9129 value = None 9130 else: 9131 value = float(value) 9132 elif field.type in ('integer','bigint'): 9133 if not value.strip(): 9134 value = None 9135 else: 9136 value = long(value) 9137 elif field.type.startswith('list:string'): 9138 value = bar_decode_string(value) 9139 elif field.type.startswith(list_reference_s): 9140 ref_table = field.type[len(list_reference_s):].strip() 9141 if id_map is not None: 9142 value = [id_map[ref_table][long(v)] \ 9143 for v in bar_decode_string(value)] 9144 else: 9145 value = [v for v in bar_decode_string(value)] 9146 elif field.type.startswith('list:'): 9147 value = bar_decode_integer(value) 9148 elif id_map and field.type.startswith('reference'): 9149 try: 9150 value = id_map[field.type[9:].strip()][long(value)] 9151 except KeyError: 9152 pass 9153 elif id_offset and field.type.startswith('reference'): 9154 try: 9155 value = id_offset[field.type[9:].strip()]+long(value) 9156 except KeyError: 9157 pass 9158 return (field.name, value)
9159 9160 def is_id(colname): 9161 if colname in self: 9162 return self[colname].type == 'id' 9163 else: 9164 return False 9165 9166 first = True 9167 unique_idx = None 9168 for lineno, line in enumerate(reader): 9169 if not line: 9170 break 9171 if not colnames: 9172 # assume this is the first line of the input, contains colnames 9173 colnames = [x.split('.',1)[-1] for x in line][:len(line)] 9174 cols, cid = [], None 9175 for i,colname in enumerate(colnames): 9176 if is_id(colname): 9177 cid = i 9178 elif colname in self.fields: 9179 cols.append((i,self[colname])) 9180 if colname == unique: 9181 unique_idx = i 9182 else: 9183 # every other line contains instead data 9184 items = [] 9185 for i, field in cols: 9186 try: 9187 items.append(fix(field, line[i], id_map, id_offset)) 9188 except ValueError: 9189 raise RuntimeError("Unable to parse line:%s field:%s value:'%s'" 9190 % (lineno+1,field,line[i])) 9191 9192 if not (id_map or cid is None or id_offset is None or unique_idx): 9193 csv_id = long(line[cid]) 9194 curr_id = self.insert(**dict(items)) 9195 if first: 9196 first = False 9197 # First curr_id is bigger than csv_id, 9198 # then we are not restoring but 9199 # extending db table with csv db table 9200 id_offset[self._tablename] = (curr_id-csv_id) \ 9201 if curr_id>csv_id else 0 9202 # create new id until we get the same as old_id+offset 9203 while curr_id<csv_id+id_offset[self._tablename]: 9204 self._db(self._db[self][colnames[cid]] == curr_id).delete() 9205 curr_id = self.insert(**dict(items)) 9206 # Validation. Check for duplicate of 'unique' &, 9207 # if present, update instead of insert. 9208 elif not unique_idx: 9209 new_id = self.insert(**dict(items)) 9210 else: 9211 unique_value = line[unique_idx] 9212 query = self._db[self][unique] == unique_value 9213 record = self._db(query).select().first() 9214 if record: 9215 record.update_record(**dict(items)) 9216 new_id = record[self._id.name] 9217 else: 9218 new_id = self.insert(**dict(items)) 9219 if id_map and cid is not None: 9220 id_map_self[long(line[cid])] = new_id 9221
9222 - def as_dict(self, flat=False, sanitize=True):
9223 table_as_dict = dict(tablename=str(self), fields=[], 9224 sequence_name=self._sequence_name, 9225 trigger_name=self._trigger_name, 9226 common_filter=self._common_filter, format=self._format, 9227 singular=self._singular, plural=self._plural) 9228 9229 for field in self: 9230 if (field.readable or field.writable) or (not sanitize): 9231 table_as_dict["fields"].append(field.as_dict( 9232 flat=flat, sanitize=sanitize)) 9233 return table_as_dict
9234
9235 - def as_xml(self, sanitize=True):
9236 if not have_serializers: 9237 raise ImportError("No xml serializers available") 9238 d = self.as_dict(flat=True, sanitize=sanitize) 9239 return serializers.xml(d)
9240
9241 - def as_json(self, sanitize=True):
9242 if not have_serializers: 9243 raise ImportError("No json serializers available") 9244 d = self.as_dict(flat=True, sanitize=sanitize) 9245 return serializers.json(d)
9246
9247 - def as_yaml(self, sanitize=True):
9248 if not have_serializers: 9249 raise ImportError("No YAML serializers available") 9250 d = self.as_dict(flat=True, sanitize=sanitize) 9251 return serializers.yaml(d)
9252
9253 - def with_alias(self, alias):
9254 return self._db._adapter.alias(self,alias)
9255
9256 - def on(self, query):
9257 return Expression(self._db,self._db._adapter.ON,self,query)
9258
9259 -def archive_record(qset,fs,archive_table,current_record):
9260 tablenames = qset.db._adapter.tables(qset.query) 9261 if len(tablenames)!=1: raise RuntimeError("cannot update join") 9262 table = qset.db[tablenames[0]] 9263 for row in qset.select(): 9264 fields = archive_table._filter_fields(row) 9265 fields[current_record] = row.id 9266 archive_table.insert(**fields) 9267 return False
9268
9269 9270 9271 -class Expression(object):
9272
9273 - def __init__( 9274 self, 9275 db, 9276 op, 9277 first=None, 9278 second=None, 9279 type=None, 9280 **optional_args 9281 ):
9282 9283 self.db = db 9284 self.op = op 9285 self.first = first 9286 self.second = second 9287 self._table = getattr(first,'_table',None) 9288 ### self._tablename = first._tablename ## CHECK 9289 if not type and first and hasattr(first,'type'): 9290 self.type = first.type 9291 else: 9292 self.type = type 9293 self.optional_args = optional_args
9294
9295 - def sum(self):
9296 db = self.db 9297 return Expression(db, db._adapter.AGGREGATE, self, 'SUM', self.type)
9298
9299 - def max(self):
9300 db = self.db 9301 return Expression(db, db._adapter.AGGREGATE, self, 'MAX', self.type)
9302
9303 - def min(self):
9304 db = self.db 9305 return Expression(db, db._adapter.AGGREGATE, self, 'MIN', self.type)
9306
9307 - def len(self):
9308 db = self.db 9309 return Expression(db, db._adapter.LENGTH, self, None, 'integer')
9310
9311 - def avg(self):
9312 db = self.db 9313 return Expression(db, db._adapter.AGGREGATE, self, 'AVG', self.type)
9314
9315 - def abs(self):
9316 db = self.db 9317 return Expression(db, db._adapter.AGGREGATE, self, 'ABS', self.type)
9318
9319 - def lower(self):
9320 db = self.db 9321 return Expression(db, db._adapter.LOWER, self, None, self.type)
9322
9323 - def upper(self):
9324 db = self.db 9325 return Expression(db, db._adapter.UPPER, self, None, self.type)
9326
9327 - def replace(self,a,b):
9328 db = self.db 9329 return Expression(db, db._adapter.REPLACE, self, (a,b), self.type)
9330
9331 - def year(self):
9332 db = self.db 9333 return Expression(db, db._adapter.EXTRACT, self, 'year', 'integer')
9334
9335 - def month(self):
9336 db = self.db 9337 return Expression(db, db._adapter.EXTRACT, self, 'month', 'integer')
9338
9339 - def day(self):
9340 db = self.db 9341 return Expression(db, db._adapter.EXTRACT, self, 'day', 'integer')
9342
9343 - def hour(self):
9344 db = self.db 9345 return Expression(db, db._adapter.EXTRACT, self, 'hour', 'integer')
9346
9347 - def minutes(self):
9348 db = self.db 9349 return Expression(db, db._adapter.EXTRACT, self, 'minute', 'integer')
9350
9351 - def coalesce(self,*others):
9352 db = self.db 9353 return Expression(db, db._adapter.COALESCE, self, others, self.type)
9354
9355 - def coalesce_zero(self):
9356 db = self.db 9357 return Expression(db, db._adapter.COALESCE_ZERO, self, None, self.type)
9358
9359 - def seconds(self):
9360 db = self.db 9361 return Expression(db, db._adapter.EXTRACT, self, 'second', 'integer')
9362
9363 - def epoch(self):
9364 db = self.db 9365 return Expression(db, db._adapter.EPOCH, self, None, 'integer')
9366
9367 - def __getslice__(self, start, stop):
9368 db = self.db 9369 if start < 0: 9370 pos0 = '(%s - %d)' % (self.len(), abs(start) - 1) 9371 else: 9372 pos0 = start + 1 9373 9374 if stop < 0: 9375 length = '(%s - %d - %s)' % (self.len(), abs(stop) - 1, pos0) 9376 elif stop == sys.maxint: 9377 length = self.len() 9378 else: 9379 length = '(%s - %s)' % (stop + 1, pos0) 9380 return Expression(db,db._adapter.SUBSTRING, 9381 self, (pos0, length), self.type)
9382
9383 - def __getitem__(self, i):
9384 return self[i:i + 1]
9385
9386 - def __str__(self):
9387 return self.db._adapter.expand(self,self.type)
9388
9389 - def __or__(self, other): # for use in sortby
9390 db = self.db 9391 return Expression(db,db._adapter.COMMA,self,other,self.type)
9392
9393 - def __invert__(self):
9394 db = self.db 9395 if hasattr(self,'_op') and self.op == db._adapter.INVERT: 9396 return self.first 9397 return Expression(db,db._adapter.INVERT,self,type=self.type)
9398
9399 - def __add__(self, other):
9400 db = self.db 9401 return Expression(db,db._adapter.ADD,self,other,self.type)
9402
9403 - def __sub__(self, other):
9404 db = self.db 9405 if self.type in ('integer','bigint'): 9406 result_type = 'integer' 9407 elif self.type in ['date','time','datetime','double','float']: 9408 result_type = 'double' 9409 elif self.type.startswith('decimal('): 9410 result_type = self.type 9411 else: 9412 raise SyntaxError("subtraction operation not supported for type") 9413 return Expression(db,db._adapter.SUB,self,other,result_type)
9414
9415 - def __mul__(self, other):
9416 db = self.db 9417 return Expression(db,db._adapter.MUL,self,other,self.type)
9418
9419 - def __div__(self, other):
9420 db = self.db 9421 return Expression(db,db._adapter.DIV,self,other,self.type)
9422
9423 - def __mod__(self, other):
9424 db = self.db 9425 return Expression(db,db._adapter.MOD,self,other,self.type)
9426
9427 - def __eq__(self, value):
9428 db = self.db 9429 return Query(db, db._adapter.EQ, self, value)
9430
9431 - def __ne__(self, value):
9432 db = self.db 9433 return Query(db, db._adapter.NE, self, value)
9434
9435 - def __lt__(self, value):
9436 db = self.db 9437 return Query(db, db._adapter.LT, self, value)
9438
9439 - def __le__(self, value):
9440 db = self.db 9441 return Query(db, db._adapter.LE, self, value)
9442
9443 - def __gt__(self, value):
9444 db = self.db 9445 return Query(db, db._adapter.GT, self, value)
9446
9447 - def __ge__(self, value):
9448 db = self.db 9449 return Query(db, db._adapter.GE, self, value)
9450
9451 - def like(self, value, case_sensitive=False):
9452 db = self.db 9453 op = case_sensitive and db._adapter.LIKE or db._adapter.ILIKE 9454 return Query(db, op, self, value)
9455
9456 - def regexp(self, value):
9457 db = self.db 9458 return Query(db, db._adapter.REGEXP, self, value)
9459
9460 - def belongs(self, *value, **kwattr):
9461 """ 9462 Accepts the following inputs: 9463 field.belongs(1,2) 9464 field.belongs((1,2)) 9465 field.belongs(query) 9466 9467 Does NOT accept: 9468 field.belongs(1) 9469 """ 9470 db = self.db 9471 if len(value) == 1: 9472 value = value[0] 9473 if isinstance(value,Query): 9474 value = db(value)._select(value.first._table._id) 9475 elif not isinstance(value, basestring): 9476 value = set(value) 9477 if kwattr.get('null') and None in value: 9478 value.remove(None) 9479 return (self == None) | Query(db, db._adapter.BELONGS, self, value) 9480 return Query(db, db._adapter.BELONGS, self, value)
9481
9482 - def startswith(self, value):
9483 db = self.db 9484 if not self.type in ('string', 'text', 'json', 'upload'): 9485 raise SyntaxError("startswith used with incompatible field type") 9486 return Query(db, db._adapter.STARTSWITH, self, value)
9487
9488 - def endswith(self, value):
9489 db = self.db 9490 if not self.type in ('string', 'text', 'json', 'upload'): 9491 raise SyntaxError("endswith used with incompatible field type") 9492 return Query(db, db._adapter.ENDSWITH, self, value)
9493
9494 - def contains(self, value, all=False, case_sensitive=False):
9495 """ 9496 The case_sensitive parameters is only useful for PostgreSQL 9497 For other RDMBs it is ignored and contains is always case in-sensitive 9498 For MongoDB and GAE contains is always case sensitive 9499 """ 9500 db = self.db 9501 if isinstance(value,(list, tuple)): 9502 subqueries = [self.contains(str(v).strip(),case_sensitive=case_sensitive) 9503 for v in value if str(v).strip()] 9504 if not subqueries: 9505 return self.contains('') 9506 else: 9507 return reduce(all and AND or OR,subqueries) 9508 if not self.type in ('string', 'text', 'json', 'upload') and not self.type.startswith('list:'): 9509 raise SyntaxError("contains used with incompatible field type") 9510 return Query(db, db._adapter.CONTAINS, self, value, case_sensitive=case_sensitive)
9511
9512 - def with_alias(self, alias):
9513 db = self.db 9514 return Expression(db, db._adapter.AS, self, alias, self.type)
9515 9516 # GIS expressions 9517
9518 - def st_asgeojson(self, precision=15, options=0, version=1):
9519 return Expression(self.db, self.db._adapter.ST_ASGEOJSON, self, 9520 dict(precision=precision, options=options, 9521 version=version), 'string')
9522
9523 - def st_astext(self):
9524 db = self.db 9525 return Expression(db, db._adapter.ST_ASTEXT, self, type='string')
9526
9527 - def st_x(self):
9528 db = self.db 9529 return Expression(db, db._adapter.ST_X, self, type='string')
9530
9531 - def st_y(self):
9532 db = self.db 9533 return Expression(db, db._adapter.ST_Y, self, type='string')
9534
9535 - def st_distance(self, other):
9536 db = self.db 9537 return Expression(db,db._adapter.ST_DISTANCE,self,other, 'double')
9538
9539 - def st_simplify(self, value):
9540 db = self.db 9541 return Expression(db, db._adapter.ST_SIMPLIFY, self, value, self.type)
9542 9543 # GIS queries 9544
9545 - def st_contains(self, value):
9546 db = self.db 9547 return Query(db, db._adapter.ST_CONTAINS, self, value)
9548
9549 - def st_equals(self, value):
9550 db = self.db 9551 return Query(db, db._adapter.ST_EQUALS, self, value)
9552
9553 - def st_intersects(self, value):
9554 db = self.db 9555 return Query(db, db._adapter.ST_INTERSECTS, self, value)
9556
9557 - def st_overlaps(self, value):
9558 db = self.db 9559 return Query(db, db._adapter.ST_OVERLAPS, self, value)
9560
9561 - def st_touches(self, value):
9562 db = self.db 9563 return Query(db, db._adapter.ST_TOUCHES, self, value)
9564
9565 - def st_within(self, value):
9566 db = self.db 9567 return Query(db, db._adapter.ST_WITHIN, self, value)
9568
9569 # for use in both Query and sortby 9570 9571 9572 -class SQLCustomType(object):
9573 """ 9574 allows defining of custom SQL types 9575 9576 Example:: 9577 9578 decimal = SQLCustomType( 9579 type ='double', 9580 native ='integer', 9581 encoder =(lambda x: int(float(x) * 100)), 9582 decoder = (lambda x: Decimal("0.00") + Decimal(str(float(x)/100)) ) 9583 ) 9584 9585 db.define_table( 9586 'example', 9587 Field('value', type=decimal) 9588 ) 9589 9590 :param type: the web2py type (default = 'string') 9591 :param native: the backend type 9592 :param encoder: how to encode the value to store it in the backend 9593 :param decoder: how to decode the value retrieved from the backend 9594 :param validator: what validators to use ( default = None, will use the 9595 default validator for type) 9596 """ 9597
9598 - def __init__( 9599 self, 9600 type='string', 9601 native=None, 9602 encoder=None, 9603 decoder=None, 9604 validator=None, 9605 _class=None, 9606 ):
9607 9608 self.type = type 9609 self.native = native 9610 self.encoder = encoder or (lambda x: x) 9611 self.decoder = decoder or (lambda x: x) 9612 self.validator = validator 9613 self._class = _class or type
9614
9615 - def startswith(self, text=None):
9616 try: 9617 return self.type.startswith(self, text) 9618 except TypeError: 9619 return False
9620
9621 - def __getslice__(self, a=0, b=100):
9622 return None
9623
9624 - def __getitem__(self, i):
9625 return None
9626
9627 - def __str__(self):
9628 return self._class
9629
9630 -class FieldVirtual(object):
9631 - def __init__(self, name, f=None, ftype='string',label=None,table_name=None):
9632 # for backward compatibility 9633 (self.name, self.f) = (name, f) if f else ('unknown', name) 9634 self.type = ftype 9635 self.label = label or self.name.capitalize().replace('_',' ') 9636 self.represent = lambda v,r:v 9637 self.formatter = IDENTITY 9638 self.comment = None 9639 self.readable = True 9640 self.writable = False 9641 self.requires = None 9642 self.widget = None 9643 self.tablename = table_name 9644 self.filter_out = None
9645 - def __str__(self):
9646 return '%s.%s' % (self.tablename, self.name)
9647
9648 -class FieldMethod(object):
9649 - def __init__(self, name, f=None, handler=None):
9650 # for backward compatibility 9651 (self.name, self.f) = (name, f) if f else ('unknown', name) 9652 self.handler = handler
9653
9654 -def list_represent(x,r=None):
9655 return ', '.join(str(y) for y in x or [])
9656
9657 -class Field(Expression):
9658 9659 Virtual = FieldVirtual 9660 Method = FieldMethod 9661 Lazy = FieldMethod # for backward compatibility 9662 9663 """ 9664 an instance of this class represents a database field 9665 9666 example:: 9667 9668 a = Field(name, 'string', length=32, default=None, required=False, 9669 requires=IS_NOT_EMPTY(), ondelete='CASCADE', 9670 notnull=False, unique=False, 9671 uploadfield=True, widget=None, label=None, comment=None, 9672 uploadfield=True, # True means store on disk, 9673 # 'a_field_name' means store in this field in db 9674 # False means file content will be discarded. 9675 writable=True, readable=True, update=None, authorize=None, 9676 autodelete=False, represent=None, uploadfolder=None, 9677 uploadseparate=False # upload to separate directories by uuid_keys 9678 # first 2 character and tablename.fieldname 9679 # False - old behavior 9680 # True - put uploaded file in 9681 # <uploaddir>/<tablename>.<fieldname>/uuid_key[:2] 9682 # directory) 9683 uploadfs=None # a pyfilesystem where to store upload 9684 9685 to be used as argument of DAL.define_table 9686 9687 allowed field types: 9688 string, boolean, integer, double, text, blob, 9689 date, time, datetime, upload, password 9690 9691 """ 9692
9693 - def __init__( 9694 self, 9695 fieldname, 9696 type='string', 9697 length=None, 9698 default=DEFAULT, 9699 required=False, 9700 requires=DEFAULT, 9701 ondelete='CASCADE', 9702 notnull=False, 9703 unique=False, 9704 uploadfield=True, 9705 widget=None, 9706 label=None, 9707 comment=None, 9708 writable=True, 9709 readable=True, 9710 update=None, 9711 authorize=None, 9712 autodelete=False, 9713 represent=None, 9714 uploadfolder=None, 9715 uploadseparate=False, 9716 uploadfs=None, 9717 compute=None, 9718 custom_store=None, 9719 custom_retrieve=None, 9720 custom_retrieve_file_properties=None, 9721 custom_delete=None, 9722 filter_in = None, 9723 filter_out = None, 9724 custom_qualifier = None, 9725 map_none = None, 9726 rname = None 9727 ):
9728 self._db = self.db = None # both for backward compatibility 9729 self.op = None 9730 self.first = None 9731 self.second = None 9732 if isinstance(fieldname, unicode): 9733 try: 9734 fieldname = str(fieldname) 9735 except UnicodeEncodeError: 9736 raise SyntaxError('Field: invalid unicode field name') 9737 self.name = fieldname = cleanup(fieldname) 9738 if not isinstance(fieldname, str) or hasattr(Table, fieldname) or \ 9739 fieldname[0] == '_' or REGEX_PYTHON_KEYWORDS.match(fieldname): 9740 raise SyntaxError('Field: invalid field name: %s' % fieldname) 9741 self.type = type if not isinstance(type, (Table,Field)) else 'reference %s' % type 9742 self.length = length if not length is None else DEFAULTLENGTH.get(self.type,512) 9743 self.default = default if default!=DEFAULT else (update or None) 9744 self.required = required # is this field required 9745 self.ondelete = ondelete.upper() # this is for reference fields only 9746 self.notnull = notnull 9747 self.unique = unique 9748 self.uploadfield = uploadfield 9749 self.uploadfolder = uploadfolder 9750 self.uploadseparate = uploadseparate 9751 self.uploadfs = uploadfs 9752 self.widget = widget 9753 self.comment = comment 9754 self.writable = writable 9755 self.readable = readable 9756 self.update = update 9757 self.authorize = authorize 9758 self.autodelete = autodelete 9759 self.represent = list_represent if \ 9760 represent==None and type in ('list:integer','list:string') else represent 9761 self.compute = compute 9762 self.isattachment = True 9763 self.custom_store = custom_store 9764 self.custom_retrieve = custom_retrieve 9765 self.custom_retrieve_file_properties = custom_retrieve_file_properties 9766 self.custom_delete = custom_delete 9767 self.filter_in = filter_in 9768 self.filter_out = filter_out 9769 self.custom_qualifier = custom_qualifier 9770 self.label = label if label!=None else fieldname.replace('_',' ').title() 9771 self.requires = requires if requires!=None else [] 9772 self.map_none = map_none 9773 self._rname = rname
9774
9775 - def set_attributes(self,*args,**attributes):
9776 self.__dict__.update(*args,**attributes)
9777
9778 - def clone(self,point_self_references_to=False,**args):
9779 field = copy.copy(self) 9780 if point_self_references_to and \ 9781 field.type == 'reference %s'+field._tablename: 9782 field.type = 'reference %s' % point_self_references_to 9783 field.__dict__.update(args) 9784 return field
9785
9786 - def store(self, file, filename=None, path=None):
9787 if self.custom_store: 9788 return self.custom_store(file,filename,path) 9789 if isinstance(file, cgi.FieldStorage): 9790 filename = filename or file.filename 9791 file = file.file 9792 elif not filename: 9793 filename = file.name 9794 filename = os.path.basename(filename.replace('/', os.sep)\ 9795 .replace('\\', os.sep)) 9796 m = REGEX_STORE_PATTERN.search(filename) 9797 extension = m and m.group('e') or 'txt' 9798 uuid_key = web2py_uuid().replace('-', '')[-16:] 9799 encoded_filename = base64.b16encode(filename).lower() 9800 newfilename = '%s.%s.%s.%s' % \ 9801 (self._tablename, self.name, uuid_key, encoded_filename) 9802 newfilename = newfilename[:(self.length - 1 - len(extension))] + '.' + extension 9803 self_uploadfield = self.uploadfield 9804 if isinstance(self_uploadfield,Field): 9805 blob_uploadfield_name = self_uploadfield.uploadfield 9806 keys={self_uploadfield.name: newfilename, 9807 blob_uploadfield_name: file.read()} 9808 self_uploadfield.table.insert(**keys) 9809 elif self_uploadfield == True: 9810 if path: 9811 pass 9812 elif self.uploadfolder: 9813 path = self.uploadfolder 9814 elif self.db._adapter.folder: 9815 path = pjoin(self.db._adapter.folder, '..', 'uploads') 9816 else: 9817 raise RuntimeError( 9818 "you must specify a Field(...,uploadfolder=...)") 9819 if self.uploadseparate: 9820 if self.uploadfs: 9821 raise RuntimeError("not supported") 9822 path = pjoin(path,"%s.%s" %(self._tablename, self.name), 9823 uuid_key[:2]) 9824 if not exists(path): 9825 os.makedirs(path) 9826 pathfilename = pjoin(path, newfilename) 9827 if self.uploadfs: 9828 dest_file = self.uploadfs.open(newfilename, 'wb') 9829 else: 9830 dest_file = open(pathfilename, 'wb') 9831 try: 9832 shutil.copyfileobj(file, dest_file) 9833 except IOError: 9834 raise IOError( 9835 'Unable to store file "%s" because invalid permissions, readonly file system, or filename too long' % pathfilename) 9836 dest_file.close() 9837 return newfilename
9838
9839 - def retrieve(self, name, path=None, nameonly=False):
9840 """ 9841 if nameonly==True return (filename, fullfilename) instead of 9842 (filename, stream) 9843 """ 9844 self_uploadfield = self.uploadfield 9845 if self.custom_retrieve: 9846 return self.custom_retrieve(name, path) 9847 import http 9848 if self.authorize or isinstance(self_uploadfield, str): 9849 row = self.db(self == name).select().first() 9850 if not row: 9851 raise http.HTTP(404) 9852 if self.authorize and not self.authorize(row): 9853 raise http.HTTP(403) 9854 file_properties = self.retrieve_file_properties(name,path) 9855 filename = file_properties['filename'] 9856 if isinstance(self_uploadfield, str): # ## if file is in DB 9857 stream = StringIO.StringIO(row[self_uploadfield] or '') 9858 elif isinstance(self_uploadfield,Field): 9859 blob_uploadfield_name = self_uploadfield.uploadfield 9860 query = self_uploadfield == name 9861 data = self_uploadfield.table(query)[blob_uploadfield_name] 9862 stream = StringIO.StringIO(data) 9863 elif self.uploadfs: 9864 # ## if file is on pyfilesystem 9865 stream = self.uploadfs.open(name, 'rb') 9866 else: 9867 # ## if file is on regular filesystem 9868 # this is intentially a sting with filename and not a stream 9869 # this propagates and allows stream_file_or_304_or_206 to be called 9870 fullname = pjoin(file_properties['path'],name) 9871 if nameonly: 9872 return (filename, fullname) 9873 stream = open(fullname,'rb') 9874 return (filename, stream)
9875
9876 - def retrieve_file_properties(self, name, path=None):
9877 m = REGEX_UPLOAD_PATTERN.match(name) 9878 if not m or not self.isattachment: 9879 raise TypeError('Can\'t retrieve %s file properties' % name) 9880 self_uploadfield = self.uploadfield 9881 if self.custom_retrieve_file_properties: 9882 return self.custom_retrieve_file_properties(name, path) 9883 if m.group('name'): 9884 try: 9885 filename = base64.b16decode(m.group('name'), True) 9886 filename = REGEX_CLEANUP_FN.sub('_', filename) 9887 except (TypeError, AttributeError): 9888 filename = name 9889 else: 9890 filename = name 9891 # ## if file is in DB 9892 if isinstance(self_uploadfield, (str, Field)): 9893 return dict(path=None,filename=filename) 9894 # ## if file is on filesystem 9895 if not path: 9896 if self.uploadfolder: 9897 path = self.uploadfolder 9898 else: 9899 path = pjoin(self.db._adapter.folder, '..', 'uploads') 9900 if self.uploadseparate: 9901 t = m.group('table') 9902 f = m.group('field') 9903 u = m.group('uuidkey') 9904 path = pjoin(path,"%s.%s" % (t,f),u[:2]) 9905 return dict(path=path,filename=filename)
9906 9907
9908 - def formatter(self, value):
9909 requires = self.requires 9910 if value is None or not requires: 9911 return value or self.map_none 9912 if not isinstance(requires, (list, tuple)): 9913 requires = [requires] 9914 elif isinstance(requires, tuple): 9915 requires = list(requires) 9916 else: 9917 requires = copy.copy(requires) 9918 requires.reverse() 9919 for item in requires: 9920 if hasattr(item, 'formatter'): 9921 value = item.formatter(value) 9922 return value
9923
9924 - def validate(self, value):
9925 if not self.requires or self.requires == DEFAULT: 9926 return ((value if value!=self.map_none else None), None) 9927 requires = self.requires 9928 if not isinstance(requires, (list, tuple)): 9929 requires = [requires] 9930 for validator in requires: 9931 (value, error) = validator(value) 9932 if error: 9933 return (value, error) 9934 return ((value if value!=self.map_none else None), None)
9935
9936 - def count(self, distinct=None):
9937 return Expression(self.db, self.db._adapter.COUNT, self, distinct, 'integer')
9938
9939 - def as_dict(self, flat=False, sanitize=True):
9940 attrs = ('name', 'authorize', 'represent', 'ondelete', 9941 'custom_store', 'autodelete', 'custom_retrieve', 9942 'filter_out', 'uploadseparate', 'widget', 'uploadfs', 9943 'update', 'custom_delete', 'uploadfield', 'uploadfolder', 9944 'custom_qualifier', 'unique', 'writable', 'compute', 9945 'map_none', 'default', 'type', 'required', 'readable', 9946 'requires', 'comment', 'label', 'length', 'notnull', 9947 'custom_retrieve_file_properties', 'filter_in') 9948 serializable = (int, long, basestring, float, tuple, 9949 bool, type(None)) 9950 9951 def flatten(obj): 9952 if isinstance(obj, dict): 9953 return dict((flatten(k), flatten(v)) for k, v in 9954 obj.items()) 9955 elif isinstance(obj, (tuple, list, set)): 9956 return [flatten(v) for v in obj] 9957 elif isinstance(obj, serializable): 9958 return obj 9959 elif isinstance(obj, (datetime.datetime, 9960 datetime.date, datetime.time)): 9961 return str(obj) 9962 else: 9963 return None
9964 9965 d = dict() 9966 if not (sanitize and not (self.readable or self.writable)): 9967 for attr in attrs: 9968 if flat: 9969 d.update({attr: flatten(getattr(self, attr))}) 9970 else: 9971 d.update({attr: getattr(self, attr)}) 9972 d["fieldname"] = d.pop("name") 9973 return d
9974
9975 - def as_xml(self, sanitize=True):
9976 if have_serializers: 9977 xml = serializers.xml 9978 else: 9979 raise ImportError("No xml serializers available") 9980 d = self.as_dict(flat=True, sanitize=sanitize) 9981 return xml(d)
9982
9983 - def as_json(self, sanitize=True):
9984 if have_serializers: 9985 json = serializers.json 9986 else: 9987 raise ImportError("No json serializers available") 9988 d = self.as_dict(flat=True, sanitize=sanitize) 9989 return json(d)
9990
9991 - def as_yaml(self, sanitize=True):
9992 if have_serializers: 9993 d = self.as_dict(flat=True, sanitize=sanitize) 9994 return serializers.yaml(d) 9995 else: 9996 raise ImportError("No YAML serializers available")
9997
9998 - def __nonzero__(self):
9999 return True
10000
10001 - def __str__(self):
10002 try: 10003 return '%s.%s' % (self.tablename, self.name) 10004 except: 10005 return '<no table>.%s' % self.name
10006
10007 10008 -class Query(object):
10009 10010 """ 10011 a query object necessary to define a set. 10012 it can be stored or can be passed to DAL.__call__() to obtain a Set 10013 10014 Example:: 10015 10016 query = db.users.name=='Max' 10017 set = db(query) 10018 records = set.select() 10019 10020 """ 10021
10022 - def __init__( 10023 self, 10024 db, 10025 op, 10026 first=None, 10027 second=None, 10028 ignore_common_filters = False, 10029 **optional_args 10030 ):
10031 self.db = self._db = db 10032 self.op = op 10033 self.first = first 10034 self.second = second 10035 self.ignore_common_filters = ignore_common_filters 10036 self.optional_args = optional_args
10037
10038 - def __repr__(self):
10039 return '<Query %s>' % BaseAdapter.expand(self.db._adapter,self)
10040
10041 - def __str__(self):
10042 return str(self.db._adapter.expand(self))
10043
10044 - def __and__(self, other):
10045 return Query(self.db,self.db._adapter.AND,self,other)
10046 10047 __rand__ = __and__ 10048
10049 - def __or__(self, other):
10050 return Query(self.db,self.db._adapter.OR,self,other)
10051 10052 __ror__ = __or__ 10053
10054 - def __invert__(self):
10055 if self.op==self.db._adapter.NOT: 10056 return self.first 10057 return Query(self.db,self.db._adapter.NOT,self)
10058
10059 - def __eq__(self, other):
10060 return repr(self) == repr(other)
10061
10062 - def __ne__(self, other):
10063 return not (self == other)
10064
10065 - def case(self,t=1,f=0):
10066 return self.db._adapter.CASE(self,t,f)
10067
10068 - def as_dict(self, flat=False, sanitize=True):
10069 """Experimental stuff 10070 10071 This allows to return a plain dictionary with the basic 10072 query representation. Can be used with json/xml services 10073 for client-side db I/O 10074 10075 Example: 10076 >>> q = db.auth_user.id != 0 10077 >>> q.as_dict(flat=True) 10078 {"op": "NE", "first":{"tablename": "auth_user", 10079 "fieldname": "id"}, 10080 "second":0} 10081 """ 10082 10083 SERIALIZABLE_TYPES = (tuple, dict, set, list, int, long, float, 10084 basestring, type(None), bool) 10085 def loop(d): 10086 newd = dict() 10087 for k, v in d.items(): 10088 if k in ("first", "second"): 10089 if isinstance(v, self.__class__): 10090 newd[k] = loop(v.__dict__) 10091 elif isinstance(v, Field): 10092 newd[k] = {"tablename": v._tablename, 10093 "fieldname": v.name} 10094 elif isinstance(v, Expression): 10095 newd[k] = loop(v.__dict__) 10096 elif isinstance(v, SERIALIZABLE_TYPES): 10097 newd[k] = v 10098 elif isinstance(v, (datetime.date, 10099 datetime.time, 10100 datetime.datetime)): 10101 newd[k] = unicode(v) 10102 elif k == "op": 10103 if callable(v): 10104 newd[k] = v.__name__ 10105 elif isinstance(v, basestring): 10106 newd[k] = v 10107 else: pass # not callable or string 10108 elif isinstance(v, SERIALIZABLE_TYPES): 10109 if isinstance(v, dict): 10110 newd[k] = loop(v) 10111 else: newd[k] = v 10112 return newd
10113 10114 if flat: 10115 return loop(self.__dict__) 10116 else: return self.__dict__
10117 10118
10119 - def as_xml(self, sanitize=True):
10120 if have_serializers: 10121 xml = serializers.xml 10122 else: 10123 raise ImportError("No xml serializers available") 10124 d = self.as_dict(flat=True, sanitize=sanitize) 10125 return xml(d)
10126
10127 - def as_json(self, sanitize=True):
10128 if have_serializers: 10129 json = serializers.json 10130 else: 10131 raise ImportError("No json serializers available") 10132 d = self.as_dict(flat=True, sanitize=sanitize) 10133 return json(d)
10134
10135 -def xorify(orderby):
10136 if not orderby: 10137 return None 10138 orderby2 = orderby[0] 10139 for item in orderby[1:]: 10140 orderby2 = orderby2 | item 10141 return orderby2
10142
10143 -def use_common_filters(query):
10144 return (query and hasattr(query,'ignore_common_filters') and \ 10145 not query.ignore_common_filters)
10146
10147 -class Set(object):
10148 10149 """ 10150 a Set represents a set of records in the database, 10151 the records are identified by the query=Query(...) object. 10152 normally the Set is generated by DAL.__call__(Query(...)) 10153 10154 given a set, for example 10155 set = db(db.users.name=='Max') 10156 you can: 10157 set.update(db.users.name='Massimo') 10158 set.delete() # all elements in the set 10159 set.select(orderby=db.users.id, groupby=db.users.name, limitby=(0,10)) 10160 and take subsets: 10161 subset = set(db.users.id<5) 10162 """ 10163
10164 - def __init__(self, db, query, ignore_common_filters = None):
10165 self.db = db 10166 self._db = db # for backward compatibility 10167 self.dquery = None 10168 10169 # if query is a dict, parse it 10170 if isinstance(query, dict): 10171 query = self.parse(query) 10172 10173 if not ignore_common_filters is None and \ 10174 use_common_filters(query) == ignore_common_filters: 10175 query = copy.copy(query) 10176 query.ignore_common_filters = ignore_common_filters 10177 self.query = query
10178
10179 - def __repr__(self):
10180 return '<Set %s>' % BaseAdapter.expand(self.db._adapter,self.query)
10181
10182 - def __call__(self, query, ignore_common_filters=False):
10183 if query is None: 10184 return self 10185 elif isinstance(query,Table): 10186 query = self.db._adapter.id_query(query) 10187 elif isinstance(query,str): 10188 query = Expression(self.db,query) 10189 elif isinstance(query,Field): 10190 query = query!=None 10191 if self.query: 10192 return Set(self.db, self.query & query, 10193 ignore_common_filters=ignore_common_filters) 10194 else: 10195 return Set(self.db, query, 10196 ignore_common_filters=ignore_common_filters)
10197
10198 - def _count(self,distinct=None):
10199 return self.db._adapter._count(self.query,distinct)
10200
10201 - def _select(self, *fields, **attributes):
10202 adapter = self.db._adapter 10203 tablenames = adapter.tables(self.query, 10204 attributes.get('join',None), 10205 attributes.get('left',None), 10206 attributes.get('orderby',None), 10207 attributes.get('groupby',None)) 10208 fields = adapter.expand_all(fields, tablenames) 10209 return adapter._select(self.query,fields,attributes)
10210
10211 - def _delete(self):
10212 db = self.db 10213 tablename = db._adapter.get_table(self.query) 10214 return db._adapter._delete(tablename,self.query)
10215
10216 - def _update(self, **update_fields):
10217 db = self.db 10218 tablename = db._adapter.get_table(self.query) 10219 fields = db[tablename]._listify(update_fields,update=True) 10220 return db._adapter._update(tablename,self.query,fields)
10221
10222 - def as_dict(self, flat=False, sanitize=True):
10223 if flat: 10224 uid = dbname = uri = None 10225 codec = self.db._db_codec 10226 if not sanitize: 10227 uri, dbname, uid = (self.db._dbname, str(self.db), 10228 self.db._db_uid) 10229 d = {"query": self.query.as_dict(flat=flat)} 10230 d["db"] = {"uid": uid, "codec": codec, 10231 "name": dbname, "uri": uri} 10232 return d 10233 else: return self.__dict__
10234
10235 - def as_xml(self, sanitize=True):
10236 if have_serializers: 10237 xml = serializers.xml 10238 else: 10239 raise ImportError("No xml serializers available") 10240 d = self.as_dict(flat=True, sanitize=sanitize) 10241 return xml(d)
10242
10243 - def as_json(self, sanitize=True):
10244 if have_serializers: 10245 json = serializers.json 10246 else: 10247 raise ImportError("No json serializers available") 10248 d = self.as_dict(flat=True, sanitize=sanitize) 10249 return json(d)
10250
10251 - def parse(self, dquery):
10252 "Experimental: Turn a dictionary into a Query object" 10253 self.dquery = dquery 10254 return self.build(self.dquery)
10255
10256 - def build(self, d):
10257 "Experimental: see .parse()" 10258 op, first, second = (d["op"], d["first"], 10259 d.get("second", None)) 10260 left = right = built = None 10261 10262 if op in ("AND", "OR"): 10263 if not (type(first), type(second)) == (dict, dict): 10264 raise SyntaxError("Invalid AND/OR query") 10265 if op == "AND": 10266 built = self.build(first) & self.build(second) 10267 else: built = self.build(first) | self.build(second) 10268 10269 elif op == "NOT": 10270 if first is None: 10271 raise SyntaxError("Invalid NOT query") 10272 built = ~self.build(first) 10273 else: 10274 # normal operation (GT, EQ, LT, ...) 10275 for k, v in {"left": first, "right": second}.items(): 10276 if isinstance(v, dict) and v.get("op"): 10277 v = self.build(v) 10278 if isinstance(v, dict) and ("tablename" in v): 10279 v = self.db[v["tablename"]][v["fieldname"]] 10280 if k == "left": left = v 10281 else: right = v 10282 10283 if hasattr(self.db._adapter, op): 10284 opm = getattr(self.db._adapter, op) 10285 10286 if op == "EQ": built = left == right 10287 elif op == "NE": built = left != right 10288 elif op == "GT": built = left > right 10289 elif op == "GE": built = left >= right 10290 elif op == "LT": built = left < right 10291 elif op == "LE": built = left <= right 10292 elif op in ("JOIN", "LEFT_JOIN", "RANDOM", "ALLOW_NULL"): 10293 built = Expression(self.db, opm) 10294 elif op in ("LOWER", "UPPER", "EPOCH", "PRIMARY_KEY", 10295 "COALESCE_ZERO", "RAW", "INVERT"): 10296 built = Expression(self.db, opm, left) 10297 elif op in ("COUNT", "EXTRACT", "AGGREGATE", "SUBSTRING", 10298 "REGEXP", "LIKE", "ILIKE", "STARTSWITH", 10299 "ENDSWITH", "ADD", "SUB", "MUL", "DIV", 10300 "MOD", "AS", "ON", "COMMA", "NOT_NULL", 10301 "COALESCE", "CONTAINS", "BELONGS"): 10302 built = Expression(self.db, opm, left, right) 10303 # expression as string 10304 elif not (left or right): built = Expression(self.db, op) 10305 else: 10306 raise SyntaxError("Operator not supported: %s" % op) 10307 10308 return built
10309
10310 - def isempty(self):
10311 return not self.select(limitby=(0,1), orderby_on_limitby=False)
10312
10313 - def count(self,distinct=None, cache=None):
10314 db = self.db 10315 if cache: 10316 cache_model, time_expire = cache 10317 sql = self._count(distinct=distinct) 10318 key = db._uri + '/' + sql 10319 if len(key)>200: key = hashlib_md5(key).hexdigest() 10320 return cache_model( 10321 key, 10322 (lambda self=self,distinct=distinct: \ 10323 db._adapter.count(self.query,distinct)), 10324 time_expire) 10325 return db._adapter.count(self.query,distinct)
10326
10327 - def select(self, *fields, **attributes):
10328 adapter = self.db._adapter 10329 tablenames = adapter.tables(self.query, 10330 attributes.get('join',None), 10331 attributes.get('left',None), 10332 attributes.get('orderby',None), 10333 attributes.get('groupby',None)) 10334 fields = adapter.expand_all(fields, tablenames) 10335 return adapter.select(self.query,fields,attributes)
10336
10337 - def nested_select(self,*fields,**attributes):
10338 return Expression(self.db,self._select(*fields,**attributes))
10339
10340 - def delete(self):
10341 db = self.db 10342 tablename = db._adapter.get_table(self.query) 10343 table = db[tablename] 10344 if any(f(self) for f in table._before_delete): return 0 10345 ret = db._adapter.delete(tablename,self.query) 10346 ret and [f(self) for f in table._after_delete] 10347 return ret
10348
10349 - def update(self, **update_fields):
10350 db = self.db 10351 tablename = db._adapter.get_table(self.query) 10352 table = db[tablename] 10353 table._attempt_upload(update_fields) 10354 if any(f(self,update_fields) for f in table._before_update): 10355 return 0 10356 fields = table._listify(update_fields,update=True) 10357 if not fields: 10358 raise SyntaxError("No fields to update") 10359 ret = db._adapter.update("%s" % table,self.query,fields) 10360 ret and [f(self,update_fields) for f in table._after_update] 10361 return ret
10362
10363 - def update_naive(self, **update_fields):
10364 """ 10365 same as update but does not call table._before_update and _after_update 10366 """ 10367 tablename = self.db._adapter.get_table(self.query) 10368 table = self.db[tablename] 10369 fields = table._listify(update_fields,update=True) 10370 if not fields: raise SyntaxError("No fields to update") 10371 10372 ret = self.db._adapter.update("%s" % table,self.query,fields) 10373 return ret
10374
10375 - def validate_and_update(self, **update_fields):
10376 tablename = self.db._adapter.get_table(self.query) 10377 response = Row() 10378 response.errors = Row() 10379 new_fields = copy.copy(update_fields) 10380 for key,value in update_fields.iteritems(): 10381 value,error = self.db[tablename][key].validate(value) 10382 if error: 10383 response.errors[key] = error 10384 else: 10385 new_fields[key] = value 10386 table = self.db[tablename] 10387 if response.errors: 10388 response.updated = None 10389 else: 10390 if not any(f(self,new_fields) for f in table._before_update): 10391 fields = table._listify(new_fields,update=True) 10392 if not fields: raise SyntaxError("No fields to update") 10393 ret = self.db._adapter.update(tablename,self.query,fields) 10394 ret and [f(self,new_fields) for f in table._after_update] 10395 else: 10396 ret = 0 10397 response.updated = ret 10398 return response
10399
10400 - def delete_uploaded_files(self, upload_fields=None):
10401 table = self.db[self.db._adapter.tables(self.query)[0]] 10402 # ## mind uploadfield==True means file is not in DB 10403 if upload_fields: 10404 fields = upload_fields.keys() 10405 else: 10406 fields = table.fields 10407 fields = [f for f in fields if table[f].type == 'upload' 10408 and table[f].uploadfield == True 10409 and table[f].autodelete] 10410 if not fields: 10411 return False 10412 for record in self.select(*[table[f] for f in fields]): 10413 for fieldname in fields: 10414 field = table[fieldname] 10415 oldname = record.get(fieldname, None) 10416 if not oldname: 10417 continue 10418 if upload_fields and oldname == upload_fields[fieldname]: 10419 continue 10420 if field.custom_delete: 10421 field.custom_delete(oldname) 10422 else: 10423 uploadfolder = field.uploadfolder 10424 if not uploadfolder: 10425 uploadfolder = pjoin( 10426 self.db._adapter.folder, '..', 'uploads') 10427 if field.uploadseparate: 10428 items = oldname.split('.') 10429 uploadfolder = pjoin( 10430 uploadfolder, 10431 "%s.%s" % (items[0], items[1]), 10432 items[2][:2]) 10433 oldpath = pjoin(uploadfolder, oldname) 10434 if exists(oldpath): 10435 os.unlink(oldpath) 10436 return False
10437
10438 -class RecordUpdater(object):
10439 - def __init__(self, colset, table, id):
10440 self.colset, self.db, self.tablename, self.id = \ 10441 colset, table._db, table._tablename, id
10442
10443 - def __call__(self, **fields):
10444 colset, db, tablename, id = self.colset, self.db, self.tablename, self.id 10445 table = db[tablename] 10446 newfields = fields or dict(colset) 10447 for fieldname in newfields.keys(): 10448 if not fieldname in table.fields or table[fieldname].type=='id': 10449 del newfields[fieldname] 10450 table._db(table._id==id,ignore_common_filters=True).update(**newfields) 10451 colset.update(newfields) 10452 return colset
10453
10454 -class RecordDeleter(object):
10455 - def __init__(self, table, id):
10456 self.db, self.tablename, self.id = table._db, table._tablename, id
10457 - def __call__(self):
10458 return self.db(self.db[self.tablename]._id==self.id).delete()
10459
10460 -class LazyReferenceGetter(object):
10461 - def __init__(self, table, id):
10462 self.db, self.tablename, self.id = table._db, table._tablename, id
10463 - def __call__(self, other_tablename):
10464 if self.db._lazy_tables is False: 10465 raise AttributeError() 10466 table = self.db[self.tablename] 10467 other_table = self.db[other_tablename] 10468 for rfield in table._referenced_by: 10469 if rfield.table == other_table: 10470 return LazySet(rfield, self.id) 10471 10472 raise AttributeError()
10473
10474 -class LazySet(object):
10475 - def __init__(self, field, id):
10476 self.db, self.tablename, self.fieldname, self.id = \ 10477 field.db, field._tablename, field.name, id
10478 - def _getset(self):
10479 query = self.db[self.tablename][self.fieldname]==self.id 10480 return Set(self.db,query)
10481 - def __repr__(self):
10482 return repr(self._getset())
10483 - def __call__(self, query, ignore_common_filters=False):
10484 return self._getset()(query, ignore_common_filters)
10485 - def _count(self,distinct=None):
10486 return self._getset()._count(distinct)
10487 - def _select(self, *fields, **attributes):
10488 return self._getset()._select(*fields,**attributes)
10489 - def _delete(self):
10490 return self._getset()._delete()
10491 - def _update(self, **update_fields):
10492 return self._getset()._update(**update_fields)
10493 - def isempty(self):
10494 return self._getset().isempty()
10495 - def count(self,distinct=None, cache=None):
10496 return self._getset().count(distinct,cache)
10497 - def select(self, *fields, **attributes):
10498 return self._getset().select(*fields,**attributes)
10499 - def nested_select(self,*fields,**attributes):
10500 return self._getset().nested_select(*fields,**attributes)
10501 - def delete(self):
10502 return self._getset().delete()
10503 - def update(self, **update_fields):
10504 return self._getset().update(**update_fields)
10505 - def update_naive(self, **update_fields):
10506 return self._getset().update_naive(**update_fields)
10507 - def validate_and_update(self, **update_fields):
10508 return self._getset().validate_and_update(**update_fields)
10509 - def delete_uploaded_files(self, upload_fields=None):
10510 return self._getset().delete_uploaded_files(upload_fields)
10511
10512 -class VirtualCommand(object):
10513 - def __init__(self,method,row):
10514 self.method=method 10515 self.row=row
10516 - def __call__(self,*args,**kwargs):
10517 return self.method(self.row,*args,**kwargs)
10518
10519 -def lazy_virtualfield(f):
10520 f.__lazy__ = True 10521 return f
10522
10523 -class Rows(object):
10524 10525 """ 10526 A wrapper for the return value of a select. It basically represents a table. 10527 It has an iterator and each row is represented as a dictionary. 10528 """ 10529 10530 # ## TODO: this class still needs some work to care for ID/OID 10531
10532 - def __init__( 10533 self, 10534 db=None, 10535 records=[], 10536 colnames=[], 10537 compact=True, 10538 rawrows=None 10539 ):
10540 self.db = db 10541 self.records = records 10542 self.colnames = colnames 10543 self.compact = compact 10544 self.response = rawrows
10545
10546 - def __repr__(self):
10547 return '<Rows (%s)>' % len(self.records)
10548
10549 - def setvirtualfields(self,**keyed_virtualfields):
10550 """ 10551 db.define_table('x',Field('number','integer')) 10552 if db(db.x).isempty(): [db.x.insert(number=i) for i in range(10)] 10553 10554 from gluon.dal import lazy_virtualfield 10555 10556 class MyVirtualFields(object): 10557 # normal virtual field (backward compatible, discouraged) 10558 def normal_shift(self): return self.x.number+1 10559 # lazy virtual field (because of @staticmethod) 10560 @lazy_virtualfield 10561 def lazy_shift(instance,row,delta=4): return row.x.number+delta 10562 db.x.virtualfields.append(MyVirtualFields()) 10563 10564 for row in db(db.x).select(): 10565 print row.number, row.normal_shift, row.lazy_shift(delta=7) 10566 """ 10567 if not keyed_virtualfields: 10568 return self 10569 for row in self.records: 10570 for (tablename,virtualfields) in keyed_virtualfields.iteritems(): 10571 attributes = dir(virtualfields) 10572 if not tablename in row: 10573 box = row[tablename] = Row() 10574 else: 10575 box = row[tablename] 10576 updated = False 10577 for attribute in attributes: 10578 if attribute[0] != '_': 10579 method = getattr(virtualfields,attribute) 10580 if hasattr(method,'__lazy__'): 10581 box[attribute]=VirtualCommand(method,row) 10582 elif type(method)==types.MethodType: 10583 if not updated: 10584 virtualfields.__dict__.update(row) 10585 updated = True 10586 box[attribute]=method() 10587 return self
10588
10589 - def __and__(self,other):
10590 if self.colnames!=other.colnames: 10591 raise Exception('Cannot & incompatible Rows objects') 10592 records = self.records+other.records 10593 return Rows(self.db,records,self.colnames)
10594
10595 - def __or__(self,other):
10596 if self.colnames!=other.colnames: 10597 raise Exception('Cannot | incompatible Rows objects') 10598 records = [record for record in other.records 10599 if not record in self.records] 10600 records = self.records + records 10601 return Rows(self.db,records,self.colnames)
10602
10603 - def __nonzero__(self):
10604 if len(self.records): 10605 return 1 10606 return 0
10607
10608 - def __len__(self):
10609 return len(self.records)
10610
10611 - def __getslice__(self, a, b):
10612 return Rows(self.db,self.records[a:b],self.colnames,compact=self.compact)
10613
10614 - def __getitem__(self, i):
10615 row = self.records[i] 10616 keys = row.keys() 10617 if self.compact and len(keys) == 1 and keys[0] != '_extra': 10618 return row[row.keys()[0]] 10619 return row
10620
10621 - def __iter__(self):
10622 """ 10623 iterator over records 10624 """ 10625 10626 for i in xrange(len(self)): 10627 yield self[i]
10628
10629 - def __str__(self):
10630 """ 10631 serializes the table into a csv file 10632 """ 10633 10634 s = StringIO.StringIO() 10635 self.export_to_csv_file(s) 10636 return s.getvalue()
10637
10638 - def first(self):
10639 if not self.records: 10640 return None 10641 return self[0]
10642
10643 - def last(self):
10644 if not self.records: 10645 return None 10646 return self[-1]
10647
10648 - def find(self,f,limitby=None):
10649 """ 10650 returns a new Rows object, a subset of the original object, 10651 filtered by the function f 10652 """ 10653 if not self: 10654 return Rows(self.db, [], self.colnames) 10655 records = [] 10656 if limitby: 10657 a,b = limitby 10658 else: 10659 a,b = 0,len(self) 10660 k = 0 10661 for row in self: 10662 if f(row): 10663 if a<=k: records.append(row) 10664 k += 1 10665 if k==b: break 10666 return Rows(self.db, records, self.colnames)
10667
10668 - def exclude(self, f):
10669 """ 10670 removes elements from the calling Rows object, filtered by the function f, 10671 and returns a new Rows object containing the removed elements 10672 """ 10673 if not self.records: 10674 return Rows(self.db, [], self.colnames) 10675 removed = [] 10676 i=0 10677 while i<len(self): 10678 row = self[i] 10679 if f(row): 10680 removed.append(self.records[i]) 10681 del self.records[i] 10682 else: 10683 i += 1 10684 return Rows(self.db, removed, self.colnames)
10685
10686 - def sort(self, f, reverse=False):
10687 """ 10688 returns a list of sorted elements (not sorted in place) 10689 """ 10690 rows = Rows(self.db,[],self.colnames,compact=False) 10691 rows.records = sorted(self,key=f,reverse=reverse) 10692 return rows
10693
10694 - def group_by_value(self, *fields, **args):
10695 """ 10696 regroups the rows, by one of the fields 10697 """ 10698 one_result = False 10699 if 'one_result' in args: 10700 one_result = args['one_result'] 10701 10702 def build_fields_struct(row, fields, num, groups): 10703 ''' helper function: 10704 ''' 10705 if num > len(fields)-1: 10706 if one_result: 10707 return row 10708 else: 10709 return [row] 10710 10711 key = fields[num] 10712 value = row[key] 10713 10714 if value not in groups: 10715 groups[value] = build_fields_struct(row, fields, num+1, {}) 10716 else: 10717 struct = build_fields_struct(row, fields, num+1, groups[ value ]) 10718 10719 # still have more grouping to do 10720 if type(struct) == type(dict()): 10721 groups[value].update() 10722 # no more grouping, first only is off 10723 elif type(struct) == type(list()): 10724 groups[value] += struct 10725 # no more grouping, first only on 10726 else: 10727 groups[value] = struct 10728 10729 return groups
10730 10731 if len(fields) == 0: 10732 return self 10733 10734 # if select returned no results 10735 if not self.records: 10736 return {} 10737 10738 grouped_row_group = dict() 10739 10740 # build the struct 10741 for row in self: 10742 build_fields_struct(row, fields, 0, grouped_row_group) 10743 10744 return grouped_row_group
10745
10746 - def render(self, i=None, fields=None):
10747 """ 10748 Takes an index and returns a copy of the indexed row with values 10749 transformed via the "represent" attributes of the associated fields. 10750 10751 If no index is specified, a generator is returned for iteration 10752 over all the rows. 10753 10754 fields -- a list of fields to transform (if None, all fields with 10755 "represent" attributes will be transformed). 10756 """ 10757 10758 10759 if i is None: 10760 return (self.render(i, fields=fields) for i in range(len(self))) 10761 import sqlhtml 10762 row = copy.deepcopy(self.records[i]) 10763 keys = row.keys() 10764 tables = [f.tablename for f in fields] if fields \ 10765 else [k for k in keys if k != '_extra'] 10766 for table in tables: 10767 repr_fields = [f.name for f in fields if f.tablename == table] \ 10768 if fields else [k for k in row[table].keys() 10769 if (hasattr(self.db[table], k) and 10770 isinstance(self.db[table][k], Field) 10771 and self.db[table][k].represent)] 10772 for field in repr_fields: 10773 row[table][field] = sqlhtml.represent( 10774 self.db[table][field], row[table][field], row[table]) 10775 if self.compact and len(keys) == 1 and keys[0] != '_extra': 10776 return row[keys[0]] 10777 return row
10778
10779 - def as_list(self, 10780 compact=True, 10781 storage_to_dict=True, 10782 datetime_to_str=False, 10783 custom_types=None):
10784 """ 10785 returns the data as a list or dictionary. 10786 :param storage_to_dict: when True returns a dict, otherwise a list(default True) 10787 :param datetime_to_str: convert datetime fields as strings (default False) 10788 """ 10789 (oc, self.compact) = (self.compact, compact) 10790 if storage_to_dict: 10791 items = [item.as_dict(datetime_to_str, custom_types) for item in self] 10792 else: 10793 items = [item for item in self] 10794 self.compact = compact 10795 return items
10796 10797
10798 - def as_dict(self, 10799 key='id', 10800 compact=True, 10801 storage_to_dict=True, 10802 datetime_to_str=False, 10803 custom_types=None):
10804 """ 10805 returns the data as a dictionary of dictionaries (storage_to_dict=True) or records (False) 10806 10807 :param key: the name of the field to be used as dict key, normally the id 10808 :param compact: ? (default True) 10809 :param storage_to_dict: when True returns a dict, otherwise a list(default True) 10810 :param datetime_to_str: convert datetime fields as strings (default False) 10811 """ 10812 10813 # test for multiple rows 10814 multi = False 10815 f = self.first() 10816 if f and isinstance(key, basestring): 10817 multi = any([isinstance(v, f.__class__) for v in f.values()]) 10818 if (not "." in key) and multi: 10819 # No key provided, default to int indices 10820 def new_key(): 10821 i = 0 10822 while True: 10823 yield i 10824 i += 1
10825 key_generator = new_key() 10826 key = lambda r: key_generator.next() 10827 10828 rows = self.as_list(compact, storage_to_dict, datetime_to_str, custom_types) 10829 if isinstance(key,str) and key.count('.')==1: 10830 (table, field) = key.split('.') 10831 return dict([(r[table][field],r) for r in rows]) 10832 elif isinstance(key,str): 10833 return dict([(r[key],r) for r in rows]) 10834 else: 10835 return dict([(key(r),r) for r in rows]) 10836 10837
10838 - def as_trees(self, parent_name='parent_id', children_name='children'):
10839 roots = [] 10840 drows = {} 10841 for row in self: 10842 drows[row.id] = row 10843 row[children_name] = [] 10844 for row in self: 10845 parent = row[parent_name] 10846 if parent is None: 10847 roots.append(row) 10848 else: 10849 drows[parent][children_name].append(row) 10850 return roots
10851
10852 - def export_to_csv_file(self, ofile, null='<NULL>', *args, **kwargs):
10853 """ 10854 export data to csv, the first line contains the column names 10855 10856 :param ofile: where the csv must be exported to 10857 :param null: how null values must be represented (default '<NULL>') 10858 :param delimiter: delimiter to separate values (default ',') 10859 :param quotechar: character to use to quote string values (default '"') 10860 :param quoting: quote system, use csv.QUOTE_*** (default csv.QUOTE_MINIMAL) 10861 :param represent: use the fields .represent value (default False) 10862 :param colnames: list of column names to use (default self.colnames) 10863 This will only work when exporting rows objects!!!! 10864 DO NOT use this with db.export_to_csv() 10865 """ 10866 delimiter = kwargs.get('delimiter', ',') 10867 quotechar = kwargs.get('quotechar', '"') 10868 quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) 10869 represent = kwargs.get('represent', False) 10870 writer = csv.writer(ofile, delimiter=delimiter, 10871 quotechar=quotechar, quoting=quoting) 10872 colnames = kwargs.get('colnames', self.colnames) 10873 write_colnames = kwargs.get('write_colnames',True) 10874 # a proper csv starting with the column names 10875 if write_colnames: 10876 writer.writerow(colnames) 10877 10878 def none_exception(value): 10879 """ 10880 returns a cleaned up value that can be used for csv export: 10881 - unicode text is encoded as such 10882 - None values are replaced with the given representation (default <NULL>) 10883 """ 10884 if value is None: 10885 return null 10886 elif isinstance(value, unicode): 10887 return value.encode('utf8') 10888 elif isinstance(value,Reference): 10889 return long(value) 10890 elif hasattr(value, 'isoformat'): 10891 return value.isoformat()[:19].replace('T', ' ') 10892 elif isinstance(value, (list,tuple)): # for type='list:..' 10893 return bar_encode(value) 10894 return value
10895 10896 for record in self: 10897 row = [] 10898 for col in colnames: 10899 if not REGEX_TABLE_DOT_FIELD.match(col): 10900 row.append(record._extra[col]) 10901 else: 10902 (t, f) = col.split('.') 10903 field = self.db[t][f] 10904 if isinstance(record.get(t, None), (Row,dict)): 10905 value = record[t][f] 10906 else: 10907 value = record[f] 10908 if field.type=='blob' and not value is None: 10909 value = base64.b64encode(value) 10910 elif represent and field.represent: 10911 value = field.represent(value) 10912 row.append(none_exception(value)) 10913 writer.writerow(row) 10914
10915 - def xml(self,strict=False,row_name='row',rows_name='rows'):
10916 """ 10917 serializes the table using sqlhtml.SQLTABLE (if present) 10918 """ 10919 10920 if strict: 10921 ncols = len(self.colnames) 10922 return '<%s>\n%s\n</%s>' % (rows_name, 10923 '\n'.join(row.as_xml(row_name=row_name, 10924 colnames=self.colnames) for 10925 row in self), rows_name) 10926 10927 import sqlhtml 10928 return sqlhtml.SQLTABLE(self).xml()
10929
10930 - def as_xml(self,row_name='row',rows_name='rows'):
10931 return self.xml(strict=True, row_name=row_name, rows_name=rows_name)
10932
10933 - def as_json(self, mode='object', default=None):
10934 """ 10935 serializes the rows to a JSON list or object with objects 10936 mode='object' is not implemented (should return a nested 10937 object structure) 10938 """ 10939 10940 items = [record.as_json(mode=mode, default=default, 10941 serialize=False, 10942 colnames=self.colnames) for 10943 record in self] 10944 10945 if have_serializers: 10946 return serializers.json(items, 10947 default=default or 10948 serializers.custom_json) 10949 elif simplejson: 10950 return simplejson.dumps(items) 10951 else: 10952 raise RuntimeError("missing simplejson")
10953 10954 # for consistent naming yet backwards compatible 10955 as_csv = __str__ 10956 json = as_json 10957
10958 10959 ################################################################################ 10960 # dummy function used to define some doctests 10961 ################################################################################ 10962 10963 -def test_all():
10964 """ 10965 10966 >>> if len(sys.argv)<2: db = DAL("sqlite://test.db") 10967 >>> if len(sys.argv)>1: db = DAL(sys.argv[1]) 10968 >>> tmp = db.define_table('users',\ 10969 Field('stringf', 'string', length=32, required=True),\ 10970 Field('booleanf', 'boolean', default=False),\ 10971 Field('passwordf', 'password', notnull=True),\ 10972 Field('uploadf', 'upload'),\ 10973 Field('blobf', 'blob'),\ 10974 Field('integerf', 'integer', unique=True),\ 10975 Field('doublef', 'double', unique=True,notnull=True),\ 10976 Field('jsonf', 'json'),\ 10977 Field('datef', 'date', default=datetime.date.today()),\ 10978 Field('timef', 'time'),\ 10979 Field('datetimef', 'datetime'),\ 10980 migrate='test_user.table') 10981 10982 Insert a field 10983 10984 >>> db.users.insert(stringf='a', booleanf=True, passwordf='p', blobf='0A',\ 10985 uploadf=None, integerf=5, doublef=3.14,\ 10986 jsonf={"j": True},\ 10987 datef=datetime.date(2001, 1, 1),\ 10988 timef=datetime.time(12, 30, 15),\ 10989 datetimef=datetime.datetime(2002, 2, 2, 12, 30, 15)) 10990 1 10991 10992 Drop the table 10993 10994 >>> db.users.drop() 10995 10996 Examples of insert, select, update, delete 10997 10998 >>> tmp = db.define_table('person',\ 10999 Field('name'),\ 11000 Field('birth','date'),\ 11001 migrate='test_person.table') 11002 >>> person_id = db.person.insert(name='Marco',birth='2005-06-22') 11003 >>> person_id = db.person.insert(name='Massimo',birth='1971-12-21') 11004 11005 commented len(db().select(db.person.ALL)) 11006 commented 2 11007 11008 >>> me = db(db.person.id==person_id).select()[0] # test select 11009 >>> me.name 11010 'Massimo' 11011 >>> db.person[2].name 11012 'Massimo' 11013 >>> db.person(2).name 11014 'Massimo' 11015 >>> db.person(name='Massimo').name 11016 'Massimo' 11017 >>> db.person(db.person.name=='Massimo').name 11018 'Massimo' 11019 >>> row = db.person[2] 11020 >>> row.name == row['name'] == row['person.name'] == row('person.name') 11021 True 11022 >>> db(db.person.name=='Massimo').update(name='massimo') # test update 11023 1 11024 >>> db(db.person.name=='Marco').select().first().delete_record() # test delete 11025 1 11026 11027 Update a single record 11028 11029 >>> me.update_record(name="Max") 11030 <Row {'name': 'Max', 'birth': datetime.date(1971, 12, 21), 'id': 2}> 11031 >>> me.name 11032 'Max' 11033 11034 Examples of complex search conditions 11035 11036 >>> len(db((db.person.name=='Max')&(db.person.birth<'2003-01-01')).select()) 11037 1 11038 >>> len(db((db.person.name=='Max')&(db.person.birth<datetime.date(2003,01,01))).select()) 11039 1 11040 >>> len(db((db.person.name=='Max')|(db.person.birth<'2003-01-01')).select()) 11041 1 11042 >>> me = db(db.person.id==person_id).select(db.person.name)[0] 11043 >>> me.name 11044 'Max' 11045 11046 Examples of search conditions using extract from date/datetime/time 11047 11048 >>> len(db(db.person.birth.month()==12).select()) 11049 1 11050 >>> len(db(db.person.birth.year()>1900).select()) 11051 1 11052 11053 Example of usage of NULL 11054 11055 >>> len(db(db.person.birth==None).select()) ### test NULL 11056 0 11057 >>> len(db(db.person.birth!=None).select()) ### test NULL 11058 1 11059 11060 Examples of search conditions using lower, upper, and like 11061 11062 >>> len(db(db.person.name.upper()=='MAX').select()) 11063 1 11064 >>> len(db(db.person.name.like('%ax')).select()) 11065 1 11066 >>> len(db(db.person.name.upper().like('%AX')).select()) 11067 1 11068 >>> len(db(~db.person.name.upper().like('%AX')).select()) 11069 0 11070 11071 orderby, groupby and limitby 11072 11073 >>> people = db().select(db.person.name, orderby=db.person.name) 11074 >>> order = db.person.name|~db.person.birth 11075 >>> people = db().select(db.person.name, orderby=order) 11076 11077 >>> people = db().select(db.person.name, orderby=db.person.name, groupby=db.person.name) 11078 11079 >>> people = db().select(db.person.name, orderby=order, limitby=(0,100)) 11080 11081 Example of one 2 many relation 11082 11083 >>> tmp = db.define_table('dog',\ 11084 Field('name'),\ 11085 Field('birth','date'),\ 11086 Field('owner',db.person),\ 11087 migrate='test_dog.table') 11088 >>> db.dog.insert(name='Snoopy', birth=None, owner=person_id) 11089 1 11090 11091 A simple JOIN 11092 11093 >>> len(db(db.dog.owner==db.person.id).select()) 11094 1 11095 11096 >>> len(db().select(db.person.ALL, db.dog.name,left=db.dog.on(db.dog.owner==db.person.id))) 11097 1 11098 11099 Drop tables 11100 11101 >>> db.dog.drop() 11102 >>> db.person.drop() 11103 11104 Example of many 2 many relation and Set 11105 11106 >>> tmp = db.define_table('author', Field('name'),\ 11107 migrate='test_author.table') 11108 >>> tmp = db.define_table('paper', Field('title'),\ 11109 migrate='test_paper.table') 11110 >>> tmp = db.define_table('authorship',\ 11111 Field('author_id', db.author),\ 11112 Field('paper_id', db.paper),\ 11113 migrate='test_authorship.table') 11114 >>> aid = db.author.insert(name='Massimo') 11115 >>> pid = db.paper.insert(title='QCD') 11116 >>> tmp = db.authorship.insert(author_id=aid, paper_id=pid) 11117 11118 Define a Set 11119 11120 >>> authored_papers = db((db.author.id==db.authorship.author_id)&(db.paper.id==db.authorship.paper_id)) 11121 >>> rows = authored_papers.select(db.author.name, db.paper.title) 11122 >>> for row in rows: print row.author.name, row.paper.title 11123 Massimo QCD 11124 11125 Example of search condition using belongs 11126 11127 >>> set = (1, 2, 3) 11128 >>> rows = db(db.paper.id.belongs(set)).select(db.paper.ALL) 11129 >>> print rows[0].title 11130 QCD 11131 11132 Example of search condition using nested select 11133 11134 >>> nested_select = db()._select(db.authorship.paper_id) 11135 >>> rows = db(db.paper.id.belongs(nested_select)).select(db.paper.ALL) 11136 >>> print rows[0].title 11137 QCD 11138 11139 Example of expressions 11140 11141 >>> mynumber = db.define_table('mynumber', Field('x', 'integer')) 11142 >>> db(mynumber).delete() 11143 0 11144 >>> for i in range(10): tmp = mynumber.insert(x=i) 11145 >>> db(mynumber).select(mynumber.x.sum())[0](mynumber.x.sum()) 11146 45 11147 11148 >>> db(mynumber.x+2==5).select(mynumber.x + 2)[0](mynumber.x + 2) 11149 5 11150 11151 Output in csv 11152 11153 >>> print str(authored_papers.select(db.author.name, db.paper.title)).strip() 11154 author.name,paper.title\r 11155 Massimo,QCD 11156 11157 Delete all leftover tables 11158 11159 >>> DAL.distributed_transaction_commit(db) 11160 11161 >>> db.mynumber.drop() 11162 >>> db.authorship.drop() 11163 >>> db.author.drop() 11164 >>> db.paper.drop() 11165 """
11166 ################################################################################ 11167 # deprecated since the new DAL; here only for backward compatibility 11168 ################################################################################ 11169 11170 SQLField = Field 11171 SQLTable = Table 11172 SQLXorable = Expression 11173 SQLQuery = Query 11174 SQLSet = Set 11175 SQLRows = Rows 11176 SQLStorage = Row 11177 SQLDB = DAL 11178 GQLDB = DAL 11179 DAL.Field = Field # was necessary in gluon/globals.py session.connect 11180 DAL.Table = Table # was necessary in gluon/globals.py session.connect
11181 11182 ################################################################################ 11183 # Geodal utils 11184 ################################################################################ 11185 11186 -def geoPoint(x,y):
11187 return "POINT (%f %f)" % (x,y)
11188
11189 -def geoLine(*line):
11190 return "LINESTRING (%s)" % ','.join("%f %f" % item for item in line)
11191
11192 -def geoPolygon(*line):
11193 return "POLYGON ((%s))" % ','.join("%f %f" % item for item in line)
11194 11195 ################################################################################ 11196 # run tests 11197 ################################################################################ 11198 11199 if __name__ == '__main__': 11200 import doctest 11201 doctest.testmod() 11202